summaryrefslogtreecommitdiffstats
path: root/drivers/mtd/spi-nor
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/mtd/spi-nor
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mtd/spi-nor')
-rw-r--r--drivers/mtd/spi-nor/Kconfig132
-rw-r--r--drivers/mtd/spi-nor/Makefile13
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c907
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c781
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c1493
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1224
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c508
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c86
-rw-r--r--drivers/mtd/spi-nor/intel-spi-platform.c57
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c935
-rw-r--r--drivers/mtd/spi-nor/intel-spi.h24
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c569
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c483
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c3013
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c720
15 files changed, 10945 insertions, 0 deletions
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
new file mode 100644
index 000000000..37775fc09
--- /dev/null
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -0,0 +1,132 @@
+menuconfig MTD_SPI_NOR
+ tristate "SPI-NOR device support"
+ depends on MTD
+ help
+ This is the framework for the SPI NOR which can be used by the SPI
+ device drivers and the SPI-NOR device driver.
+
+if MTD_SPI_NOR
+
+config MTD_MT81xx_NOR
+ tristate "Mediatek MT81xx SPI NOR flash controller"
+ depends on HAS_IOMEM
+ help
+ This enables access to SPI NOR flash, using MT81xx SPI NOR flash
+ controller. This controller does not support generic SPI BUS, it only
+ supports SPI NOR Flash.
+
+config MTD_SPI_NOR_USE_4K_SECTORS
+ bool "Use small 4096 B erase sectors"
+ default y
+ help
+ Many flash memories support erasing small (4096 B) sectors. Depending
+ on the usage this feature may provide performance gain in comparison
+ to erasing whole blocks (32/64 KiB).
+ Changing a small part of the flash's contents is usually faster with
+ small sectors. On the other hand erasing should be faster when using
+ 64 KiB block instead of 16 × 4 KiB sectors.
+
+ Please note that some tools/drivers/filesystems may not work with
+ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
+config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_FSL_QUADSPI
+ tristate "Freescale Quad SPI controller"
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
+config SPI_HISI_SFC
+ tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for hisilicon SPI-NOR flash controller.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
+
+config SPI_INTEL_SPI
+ tristate
+
+config SPI_INTEL_SPI_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on X86 && PCI
+ select SPI_INTEL_SPI
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-pci.
+
+config SPI_INTEL_SPI_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86
+ select SPI_INTEL_SPI
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-platform.
+
+config SPI_STM32_QUADSPI
+ tristate "STM32 Quad SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ This enables support for the STM32 Quad SPI controller.
+ We only connect the NOR to this controller.
+
+endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
new file mode 100644
index 000000000..f4c61d282
--- /dev/null
+++ b/drivers/mtd/spi-nor/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
+obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
new file mode 100644
index 000000000..95e54468c
--- /dev/null
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -0,0 +1,907 @@
+/*
+ * ASPEED Static Memory Controller driver
+ *
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/sizes.h>
+#include <linux/sysfs.h>
+
+#define DEVICE_NAME "aspeed-smc"
+
+/*
+ * The driver only support SPI flash
+ */
+enum aspeed_smc_flash_type {
+ smc_type_nor = 0,
+ smc_type_nand = 1,
+ smc_type_spi = 2,
+};
+
+struct aspeed_smc_chip;
+
+struct aspeed_smc_info {
+ u32 maxsize; /* maximum size of chip window */
+ u8 nce; /* number of chip enables */
+ bool hastype; /* flash type field exists in config reg */
+ u8 we0; /* shift for write enable bit for CE0 */
+ u8 ctl0; /* offset in regs of ctl for CE0 */
+
+ void (*set_4b)(struct aspeed_smc_chip *chip);
+};
+
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip);
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip);
+
+static const struct aspeed_smc_info fmc_2400_info = {
+ .maxsize = 64 * 1024 * 1024,
+ .nce = 5,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2400_info = {
+ .maxsize = 64 * 1024 * 1024,
+ .nce = 1,
+ .hastype = false,
+ .we0 = 0,
+ .ctl0 = 0x04,
+ .set_4b = aspeed_smc_chip_set_4b_spi_2400,
+};
+
+static const struct aspeed_smc_info fmc_2500_info = {
+ .maxsize = 256 * 1024 * 1024,
+ .nce = 3,
+ .hastype = true,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+static const struct aspeed_smc_info spi_2500_info = {
+ .maxsize = 128 * 1024 * 1024,
+ .nce = 2,
+ .hastype = false,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .set_4b = aspeed_smc_chip_set_4b,
+};
+
+enum aspeed_smc_ctl_reg_value {
+ smc_base, /* base value without mode for other commands */
+ smc_read, /* command reg for (maybe fast) reads */
+ smc_write, /* command reg for writes */
+ smc_max,
+};
+
+struct aspeed_smc_controller;
+
+struct aspeed_smc_chip {
+ int cs;
+ struct aspeed_smc_controller *controller;
+ void __iomem *ctl; /* control register */
+ void __iomem *ahb_base; /* base of chip window */
+ u32 ahb_window_size; /* chip mapping window size */
+ u32 ctl_val[smc_max]; /* control settings */
+ enum aspeed_smc_flash_type type; /* what type of flash */
+ struct spi_nor nor;
+};
+
+struct aspeed_smc_controller {
+ struct device *dev;
+
+ struct mutex mutex; /* controller access mutex */
+ const struct aspeed_smc_info *info; /* type info of controller */
+ void __iomem *regs; /* controller registers */
+ void __iomem *ahb_base; /* per-chip windows resource */
+ u32 ahb_window_size; /* full mapping window size */
+
+ struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
+};
+
+/*
+ * SPI Flash Configuration Register (AST2500 SPI)
+ * or
+ * Type setting Register (AST2500 FMC).
+ * CE0 and CE1 can only be of type SPI. CE2 can be of type NOR but the
+ * driver does not support it.
+ */
+#define CONFIG_REG 0x0
+#define CONFIG_DISABLE_LEGACY BIT(31) /* 1 */
+
+#define CONFIG_CE2_WRITE BIT(18)
+#define CONFIG_CE1_WRITE BIT(17)
+#define CONFIG_CE0_WRITE BIT(16)
+
+#define CONFIG_CE2_TYPE BIT(4) /* AST2500 FMC only */
+#define CONFIG_CE1_TYPE BIT(2) /* AST2500 FMC only */
+#define CONFIG_CE0_TYPE BIT(0) /* AST2500 FMC only */
+
+/*
+ * CE Control Register
+ */
+#define CE_CONTROL_REG 0x4
+
+/*
+ * CEx Control Register
+ */
+#define CONTROL_AAF_MODE BIT(31)
+#define CONTROL_IO_MODE_MASK GENMASK(30, 28)
+#define CONTROL_IO_DUAL_DATA BIT(29)
+#define CONTROL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28))
+#define CONTROL_IO_QUAD_DATA BIT(30)
+#define CONTROL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28))
+#define CONTROL_CE_INACTIVE_SHIFT 24
+#define CONTROL_CE_INACTIVE_MASK GENMASK(27, \
+ CONTROL_CE_INACTIVE_SHIFT)
+/* 0 = 16T ... 15 = 1T T=HCLK */
+#define CONTROL_COMMAND_SHIFT 16
+#define CONTROL_DUMMY_COMMAND_OUT BIT(15)
+#define CONTROL_IO_DUMMY_HI BIT(14)
+#define CONTROL_IO_DUMMY_HI_SHIFT 14
+#define CONTROL_CLK_DIV4 BIT(13) /* others */
+#define CONTROL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI */
+#define CONTROL_RW_MERGE BIT(12)
+#define CONTROL_IO_DUMMY_LO_SHIFT 6
+#define CONTROL_IO_DUMMY_LO GENMASK(7, \
+ CONTROL_IO_DUMMY_LO_SHIFT)
+#define CONTROL_IO_DUMMY_MASK (CONTROL_IO_DUMMY_HI | \
+ CONTROL_IO_DUMMY_LO)
+#define CONTROL_IO_DUMMY_SET(dummy) \
+ (((((dummy) >> 2) & 0x1) << CONTROL_IO_DUMMY_HI_SHIFT) | \
+ (((dummy) & 0x3) << CONTROL_IO_DUMMY_LO_SHIFT))
+
+#define CONTROL_CLOCK_FREQ_SEL_SHIFT 8
+#define CONTROL_CLOCK_FREQ_SEL_MASK GENMASK(11, \
+ CONTROL_CLOCK_FREQ_SEL_SHIFT)
+#define CONTROL_LSB_FIRST BIT(5)
+#define CONTROL_CLOCK_MODE_3 BIT(4)
+#define CONTROL_IN_DUAL_DATA BIT(3)
+#define CONTROL_CE_STOP_ACTIVE_CONTROL BIT(2)
+#define CONTROL_COMMAND_MODE_MASK GENMASK(1, 0)
+#define CONTROL_COMMAND_MODE_NORMAL 0
+#define CONTROL_COMMAND_MODE_FREAD 1
+#define CONTROL_COMMAND_MODE_WRITE 2
+#define CONTROL_COMMAND_MODE_USER 3
+
+#define CONTROL_KEEP_MASK \
+ (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \
+ CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
+
+/*
+ * The Segment Register uses a 8MB unit to encode the start address
+ * and the end address of the mapping window of a flash SPI slave :
+ *
+ * | byte 1 | byte 2 | byte 3 | byte 4 |
+ * +--------+--------+--------+--------+
+ * | end | start | 0 | 0 |
+ */
+#define SEGMENT_ADDR_REG0 0x30
+#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23)
+#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23)
+#define SEGMENT_ADDR_VALUE(start, end) \
+ (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24))
+#define SEGMENT_ADDR_REG(controller, cs) \
+ ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4)
+
+/*
+ * In user mode all data bytes read or written to the chip decode address
+ * range are transferred to or from the SPI bus. The range is treated as a
+ * fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned
+ * to its size. The address within the multiple 8kB range is ignored when
+ * sending bytes to the SPI bus.
+ *
+ * On the arm architecture, as of Linux version 4.3, memcpy_fromio and
+ * memcpy_toio on little endian targets use the optimized memcpy routines
+ * that were designed for well behavied memory storage. These routines
+ * have a stutter if the source and destination are not both word aligned,
+ * once with a duplicate access to the source after aligning to the
+ * destination to a word boundary, and again with a duplicate access to
+ * the source when the final byte count is not word aligned.
+ *
+ * When writing or reading the fifo this stutter discards data or sends
+ * too much data to the fifo and can not be used by this driver.
+ *
+ * While the low level io string routines that implement the insl family do
+ * the desired accesses and memory increments, the cross architecture io
+ * macros make them essentially impossible to use on a memory mapped address
+ * instead of a a token from the call to iomap of an io port.
+ *
+ * These fifo routines use readl and friends to a constant io port and update
+ * the memory buffer pointer and count via explicit code. The final updates
+ * to len are optimistically suppressed.
+ */
+static int aspeed_smc_read_from_ahb(void *buf, void __iomem *src, size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ ioread32_rep(src, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ ioread8_rep(src, (u8 *)buf + offset, len);
+ return 0;
+}
+
+static int aspeed_smc_write_to_ahb(void __iomem *dst, const void *buf,
+ size_t len)
+{
+ size_t offset = 0;
+
+ if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) &&
+ IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) {
+ iowrite32_rep(dst, buf, len >> 2);
+ offset = len & ~0x3;
+ len -= offset;
+ }
+ iowrite8_rep(dst, (const u8 *)buf + offset, len);
+ return 0;
+}
+
+static inline u32 aspeed_smc_chip_write_bit(struct aspeed_smc_chip *chip)
+{
+ return BIT(chip->controller->info->we0 + chip->cs);
+}
+
+static void aspeed_smc_chip_check_config(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ reg = readl(controller->regs + CONFIG_REG);
+
+ if (reg & aspeed_smc_chip_write_bit(chip))
+ return;
+
+ dev_dbg(controller->dev, "config write is not set ! @%p: 0x%08x\n",
+ controller->regs + CONFIG_REG, reg);
+ reg |= aspeed_smc_chip_write_bit(chip);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_start_user(struct spi_nor *nor)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ u32 ctl = chip->ctl_val[smc_base];
+
+ /*
+ * When the chip is controlled in user mode, we need write
+ * access to send the opcodes to it. So check the config.
+ */
+ aspeed_smc_chip_check_config(chip);
+
+ ctl |= CONTROL_COMMAND_MODE_USER |
+ CONTROL_CE_STOP_ACTIVE_CONTROL;
+ writel(ctl, chip->ctl);
+
+ ctl &= ~CONTROL_CE_STOP_ACTIVE_CONTROL;
+ writel(ctl, chip->ctl);
+}
+
+static void aspeed_smc_stop_user(struct spi_nor *nor)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ u32 ctl = chip->ctl_val[smc_read];
+ u32 ctl2 = ctl | CONTROL_COMMAND_MODE_USER |
+ CONTROL_CE_STOP_ACTIVE_CONTROL;
+
+ writel(ctl2, chip->ctl); /* stop user CE control */
+ writel(ctl, chip->ctl); /* default to fread or read mode */
+}
+
+static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ mutex_lock(&chip->controller->mutex);
+ return 0;
+}
+
+static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ mutex_unlock(&chip->controller->mutex);
+}
+
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_smc_read_from_ahb(buf, chip->ahb_base, len);
+ aspeed_smc_stop_user(nor);
+ return 0;
+}
+
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1);
+ aspeed_smc_write_to_ahb(chip->ahb_base, buf, len);
+ aspeed_smc_stop_user(nor);
+ return 0;
+}
+
+static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ __be32 temp;
+ u32 cmdaddr;
+
+ switch (nor->addr_width) {
+ default:
+ WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
+ nor->addr_width);
+ /* FALLTHROUGH */
+ case 3:
+ cmdaddr = addr & 0xFFFFFF;
+ cmdaddr |= cmd << 24;
+
+ temp = cpu_to_be32(cmdaddr);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ case 4:
+ temp = cpu_to_be32(addr);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &cmd, 1);
+ aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4);
+ break;
+ }
+}
+
+static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *read_buf)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+ int i;
+ u8 dummy = 0xFF;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from);
+ for (i = 0; i < chip->nor.read_dummy / 8; i++)
+ aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+
+ aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len);
+ aspeed_smc_stop_user(nor);
+ return len;
+}
+
+static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ aspeed_smc_start_user(nor);
+ aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to);
+ aspeed_smc_write_to_ahb(chip->ahb_base, write_buf, len);
+ aspeed_smc_stop_user(nor);
+ return len;
+}
+
+static int aspeed_smc_unregister(struct aspeed_smc_controller *controller)
+{
+ struct aspeed_smc_chip *chip;
+ int n;
+
+ for (n = 0; n < controller->info->nce; n++) {
+ chip = controller->chips[n];
+ if (chip)
+ mtd_device_unregister(&chip->nor.mtd);
+ }
+
+ return 0;
+}
+
+static int aspeed_smc_remove(struct platform_device *dev)
+{
+ return aspeed_smc_unregister(platform_get_drvdata(dev));
+}
+
+static const struct of_device_id aspeed_smc_matches[] = {
+ { .compatible = "aspeed,ast2400-fmc", .data = &fmc_2400_info },
+ { .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info },
+ { .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info },
+ { .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_smc_matches);
+
+/*
+ * Each chip has a mapping window defined by a segment address
+ * register defining a start and an end address on the AHB bus. These
+ * addresses can be configured to fit the chip size and offer a
+ * contiguous memory region across chips. For the moment, we only
+ * check that each chip segment is valid.
+ */
+static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
+ struct resource *res)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 offset = 0;
+ u32 reg;
+
+ if (controller->info->nce > 1) {
+ reg = readl(SEGMENT_ADDR_REG(controller, chip->cs));
+
+ if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg))
+ return NULL;
+
+ offset = SEGMENT_ADDR_START(reg) - res->start;
+ }
+
+ return controller->ahb_base + offset;
+}
+
+static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller)
+{
+ u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0));
+
+ return SEGMENT_ADDR_START(seg0_val);
+}
+
+static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
+ u32 size)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ void __iomem *seg_reg;
+ u32 seg_oldval, seg_newval, ahb_base_phy, end;
+
+ ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+
+ seg_reg = SEGMENT_ADDR_REG(controller, cs);
+ seg_oldval = readl(seg_reg);
+
+ /*
+ * If the chip size is not specified, use the default segment
+ * size, but take into account the possible overlap with the
+ * previous segment
+ */
+ if (!size)
+ size = SEGMENT_ADDR_END(seg_oldval) - start;
+
+ /*
+ * The segment cannot exceed the maximum window size of the
+ * controller.
+ */
+ if (start + size > ahb_base_phy + controller->ahb_window_size) {
+ size = ahb_base_phy + controller->ahb_window_size - start;
+ dev_warn(chip->nor.dev, "CE%d window resized to %dMB",
+ cs, size >> 20);
+ }
+
+ end = start + size;
+ seg_newval = SEGMENT_ADDR_VALUE(start, end);
+ writel(seg_newval, seg_reg);
+
+ /*
+ * Restore default value if something goes wrong. The chip
+ * might have set some bogus value and we would loose access
+ * to the chip.
+ */
+ if (seg_newval != readl(seg_reg)) {
+ dev_err(chip->nor.dev, "CE%d window invalid", cs);
+ writel(seg_oldval, seg_reg);
+ start = SEGMENT_ADDR_START(seg_oldval);
+ end = SEGMENT_ADDR_END(seg_oldval);
+ size = end - start;
+ }
+
+ dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB",
+ cs, start, end, size >> 20);
+
+ return size;
+}
+
+/*
+ * The segment register defines the mapping window on the AHB bus and
+ * it needs to be configured depending on the chip size. The segment
+ * register of the following CE also needs to be tuned in order to
+ * provide a contiguous window across multiple chips.
+ *
+ * This is expected to be called in increasing CE order
+ */
+static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 ahb_base_phy, start;
+ u32 size = chip->nor.mtd.size;
+
+ /*
+ * Each controller has a chip size limit for direct memory
+ * access
+ */
+ if (size > controller->info->maxsize)
+ size = controller->info->maxsize;
+
+ /*
+ * The AST2400 SPI controller only handles one chip and does
+ * not have segment registers. Let's use the chip size for the
+ * AHB window.
+ */
+ if (controller->info == &spi_2400_info)
+ goto out;
+
+ /*
+ * The AST2500 SPI controller has a HW bug when the CE0 chip
+ * size reaches 128MB. Enforce a size limit of 120MB to
+ * prevent the controller from using bogus settings in the
+ * segment register.
+ */
+ if (chip->cs == 0 && controller->info == &spi_2500_info &&
+ size == SZ_128M) {
+ size = 120 << 20;
+ dev_info(chip->nor.dev,
+ "CE%d window resized to %dMB (AST2500 HW quirk)",
+ chip->cs, size >> 20);
+ }
+
+ ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+
+ /*
+ * As a start address for the current segment, use the default
+ * start address if we are handling CE0 or use the previous
+ * segment ending address
+ */
+ if (chip->cs) {
+ u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1));
+
+ start = SEGMENT_ADDR_END(prev);
+ } else {
+ start = ahb_base_phy;
+ }
+
+ size = chip_set_segment(chip, chip->cs, start, size);
+
+ /* Update chip base address on the AHB bus */
+ chip->ahb_base = controller->ahb_base + (start - ahb_base_phy);
+
+ /*
+ * Now, make sure the next segment does not overlap with the
+ * current one we just configured, even if there is no
+ * available chip. That could break access in Command Mode.
+ */
+ if (chip->cs < controller->info->nce - 1)
+ chip_set_segment(chip, chip->cs + 1, start + size, 0);
+
+out:
+ if (size < chip->nor.mtd.size)
+ dev_warn(chip->nor.dev,
+ "CE%d window too small for chip %dMB",
+ chip->cs, (u32)chip->nor.mtd.size >> 20);
+
+ return size;
+}
+
+static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ reg = readl(controller->regs + CONFIG_REG);
+
+ reg |= aspeed_smc_chip_write_bit(chip);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ chip->type = type;
+
+ reg = readl(controller->regs + CONFIG_REG);
+ reg &= ~(3 << (chip->cs * 2));
+ reg |= chip->type << (chip->cs * 2);
+ writel(reg, controller->regs + CONFIG_REG);
+}
+
+/*
+ * The first chip of the AST2500 FMC flash controller is strapped by
+ * hardware, or autodetected, but other chips need to be set. Enforce
+ * the 4B setting for all chips.
+ */
+static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ u32 reg;
+
+ reg = readl(controller->regs + CE_CONTROL_REG);
+ reg |= 1 << chip->cs;
+ writel(reg, controller->regs + CE_CONTROL_REG);
+}
+
+/*
+ * The AST2400 SPI flash controller does not have a CE Control
+ * register. It uses the CE0 control register to set 4Byte mode at the
+ * controller level.
+ */
+static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip)
+{
+ chip->ctl_val[smc_base] |= CONTROL_IO_ADDRESS_4B;
+ chip->ctl_val[smc_read] |= CONTROL_IO_ADDRESS_4B;
+}
+
+static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip,
+ struct resource *res)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ u32 reg, base_reg;
+
+ /*
+ * Always turn on the write enable bit to allow opcodes to be
+ * sent in user mode.
+ */
+ aspeed_smc_chip_enable_write(chip);
+
+ /* The driver only supports SPI type flash */
+ if (info->hastype)
+ aspeed_smc_chip_set_type(chip, smc_type_spi);
+
+ /*
+ * Configure chip base address in memory
+ */
+ chip->ahb_base = aspeed_smc_chip_base(chip, res);
+ if (!chip->ahb_base) {
+ dev_warn(chip->nor.dev, "CE%d window closed", chip->cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Get value of the inherited control register. U-Boot usually
+ * does some timing calibration on the FMC chip, so it's good
+ * to keep them. In the future, we should handle calibration
+ * from Linux.
+ */
+ reg = readl(chip->ctl);
+ dev_dbg(controller->dev, "control register: %08x\n", reg);
+
+ base_reg = reg & CONTROL_KEEP_MASK;
+ if (base_reg != reg) {
+ dev_dbg(controller->dev,
+ "control register changed to: %08x\n",
+ base_reg);
+ }
+ chip->ctl_val[smc_base] = base_reg;
+
+ /*
+ * Retain the prior value of the control register as the
+ * default if it was normal access mode. Otherwise start with
+ * the sanitized base value set to read mode.
+ */
+ if ((reg & CONTROL_COMMAND_MODE_MASK) ==
+ CONTROL_COMMAND_MODE_NORMAL)
+ chip->ctl_val[smc_read] = reg;
+ else
+ chip->ctl_val[smc_read] = chip->ctl_val[smc_base] |
+ CONTROL_COMMAND_MODE_NORMAL;
+
+ dev_dbg(controller->dev, "default control register: %08x\n",
+ chip->ctl_val[smc_read]);
+ return 0;
+}
+
+static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ u32 cmd;
+
+ if (chip->nor.addr_width == 4 && info->set_4b)
+ info->set_4b(chip);
+
+ /* This is for direct AHB access when using Command Mode. */
+ chip->ahb_window_size = aspeed_smc_chip_set_segment(chip);
+
+ /*
+ * base mode has not been optimized yet. use it for writes.
+ */
+ chip->ctl_val[smc_write] = chip->ctl_val[smc_base] |
+ chip->nor.program_opcode << CONTROL_COMMAND_SHIFT |
+ CONTROL_COMMAND_MODE_WRITE;
+
+ dev_dbg(controller->dev, "write control register: %08x\n",
+ chip->ctl_val[smc_write]);
+
+ /*
+ * TODO: Adjust clocks if fast read is supported and interpret
+ * SPI-NOR flags to adjust controller settings.
+ */
+ if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
+ if (chip->nor.read_dummy == 0)
+ cmd = CONTROL_COMMAND_MODE_NORMAL;
+ else
+ cmd = CONTROL_COMMAND_MODE_FREAD;
+ } else {
+ dev_err(chip->nor.dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ chip->ctl_val[smc_read] |= cmd |
+ CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8);
+
+ dev_dbg(controller->dev, "base control register: %08x\n",
+ chip->ctl_val[smc_read]);
+ return 0;
+}
+
+static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
+ struct device_node *np, struct resource *r)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ const struct aspeed_smc_info *info = controller->info;
+ struct device *dev = controller->dev;
+ struct device_node *child;
+ unsigned int cs;
+ int ret = -ENODEV;
+
+ for_each_available_child_of_node(np, child) {
+ struct aspeed_smc_chip *chip;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+
+ /* This driver does not support NAND or NOR flash devices. */
+ if (!of_device_is_compatible(child, "jedec,spi-nor"))
+ continue;
+
+ ret = of_property_read_u32(child, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "Couldn't not read chip select.\n");
+ break;
+ }
+
+ if (cs >= info->nce) {
+ dev_err(dev, "Chip select %d out of range.\n",
+ cs);
+ ret = -ERANGE;
+ break;
+ }
+
+ if (controller->chips[cs]) {
+ dev_err(dev, "Chip select %d already in use by %s\n",
+ cs, dev_name(controller->chips[cs]->nor.dev));
+ ret = -EBUSY;
+ break;
+ }
+
+ chip = devm_kzalloc(controller->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ chip->controller = controller;
+ chip->ctl = controller->regs + info->ctl0 + cs * 4;
+ chip->cs = cs;
+
+ nor = &chip->nor;
+ mtd = &nor->mtd;
+
+ nor->dev = dev;
+ nor->priv = chip;
+ spi_nor_set_flash_node(nor, child);
+ nor->read = aspeed_smc_read_user;
+ nor->write = aspeed_smc_write_user;
+ nor->read_reg = aspeed_smc_read_reg;
+ nor->write_reg = aspeed_smc_write_reg;
+ nor->prepare = aspeed_smc_prep;
+ nor->unprepare = aspeed_smc_unprep;
+
+ ret = aspeed_smc_chip_setup_init(chip, r);
+ if (ret)
+ break;
+
+ /*
+ * TODO: Add support for Dual and Quad SPI protocols
+ * attach when board support is present as determined
+ * by of property.
+ */
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ break;
+
+ ret = aspeed_smc_chip_setup_finish(chip);
+ if (ret)
+ break;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ break;
+
+ controller->chips[cs] = chip;
+ }
+
+ if (ret)
+ aspeed_smc_unregister(controller);
+
+ return ret;
+}
+
+static int aspeed_smc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct aspeed_smc_controller *controller;
+ const struct of_device_id *match;
+ const struct aspeed_smc_info *info;
+ struct resource *res;
+ int ret;
+
+ match = of_match_device(aspeed_smc_matches, &pdev->dev);
+ if (!match || !match->data)
+ return -ENODEV;
+ info = match->data;
+
+ controller = devm_kzalloc(&pdev->dev,
+ struct_size(controller, chips, info->nce),
+ GFP_KERNEL);
+ if (!controller)
+ return -ENOMEM;
+ controller->info = info;
+ controller->dev = dev;
+
+ mutex_init(&controller->mutex);
+ platform_set_drvdata(pdev, controller);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ controller->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(controller->regs))
+ return PTR_ERR(controller->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ controller->ahb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(controller->ahb_base))
+ return PTR_ERR(controller->ahb_base);
+
+ controller->ahb_window_size = resource_size(res);
+
+ ret = aspeed_smc_setup_flash(controller, np, res);
+ if (ret)
+ dev_err(dev, "Aspeed SMC probe failed %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver aspeed_smc_driver = {
+ .probe = aspeed_smc_probe,
+ .remove = aspeed_smc_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_smc_matches,
+ }
+};
+
+module_platform_driver(aspeed_smc_driver);
+
+MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver");
+MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
new file mode 100644
index 000000000..820048726
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -0,0 +1,781 @@
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio/consumer.h>
+
+/* QSPI register offsets */
+#define QSPI_CR 0x0000 /* Control Register */
+#define QSPI_MR 0x0004 /* Mode Register */
+#define QSPI_RD 0x0008 /* Receive Data Register */
+#define QSPI_TD 0x000c /* Transmit Data Register */
+#define QSPI_SR 0x0010 /* Status Register */
+#define QSPI_IER 0x0014 /* Interrupt Enable Register */
+#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
+#define QSPI_IMR 0x001c /* Interrupt Mask Register */
+#define QSPI_SCR 0x0020 /* Serial Clock Register */
+
+#define QSPI_IAR 0x0030 /* Instruction Address Register */
+#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+
+#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
+#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+
+#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
+#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC /* Version Register */
+
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN BIT(0)
+#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_LASTXFER BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SSM BIT(0)
+#define QSPI_MR_LLB BIT(1)
+#define QSPI_MR_WDRBT BIT(2)
+#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
+#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
+#define QSPI_SR_RDRF BIT(0)
+#define QSPI_SR_TDRE BIT(1)
+#define QSPI_SR_TXEMPTY BIT(2)
+#define QSPI_SR_OVRES BIT(3)
+#define QSPI_SR_CSR BIT(8)
+#define QSPI_SR_CSS BIT(9)
+#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_QSPIENS BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL BIT(0)
+#define QSPI_SCR_CPHA BIT(1)
+#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Instruction Code Register) */
+#define QSPI_ICR_INST_MASK GENMASK(7, 0)
+#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_INSTEN BIT(4)
+#define QSPI_IFR_ADDREN BIT(5)
+#define QSPI_IFR_OPTEN BIT(6)
+#define QSPI_IFR_DATAEN BIT(7)
+#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT (0 << 8)
+#define QSPI_IFR_OPTL_2BIT (1 << 8)
+#define QSPI_IFR_OPTL_4BIT (2 << 8)
+#define QSPI_IFR_OPTL_8BIT (3 << 8)
+#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN BIT(0)
+#define QSPI_SMR_RVDIS BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+
+struct atmel_qspi {
+ void __iomem *regs;
+ void __iomem *mem;
+ struct clk *clk;
+ struct platform_device *pdev;
+ u32 pending;
+
+ struct spi_nor nor;
+ u32 clk_rate;
+ struct completion cmd_completion;
+};
+
+struct atmel_qspi_command {
+ union {
+ struct {
+ u32 instruction:1;
+ u32 address:3;
+ u32 mode:1;
+ u32 dummy:1;
+ u32 data:1;
+ u32 reserved:25;
+ } bits;
+ u32 word;
+ } enable;
+ u8 instruction;
+ u8 mode;
+ u8 num_mode_cycles;
+ u8 num_dummy_cycles;
+ u32 address;
+
+ size_t buf_len;
+ const void *tx_buf;
+ void *rx_buf;
+};
+
+/* Register access functions */
+static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
+{
+ return readl_relaxed(aq->regs + reg);
+}
+
+static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
+{
+ writel_relaxed(value, aq->regs + reg);
+}
+
+static int atmel_qspi_run_transfer(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd)
+{
+ void __iomem *ahb_mem;
+
+ /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */
+ ahb_mem = aq->mem;
+ if (cmd->enable.bits.address)
+ ahb_mem += cmd->address;
+ if (cmd->tx_buf)
+ _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len);
+ else
+ _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len);
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void atmel_qspi_debug_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr)
+{
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ size_t len = 0;
+ int i;
+
+ if (cmd->enable.bits.instruction)
+ cmd_buf[len++] = cmd->instruction;
+
+ for (i = cmd->enable.bits.address-1; i >= 0; --i)
+ cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff;
+
+ if (cmd->enable.bits.mode)
+ cmd_buf[len++] = cmd->mode;
+
+ if (cmd->enable.bits.dummy) {
+ int num = cmd->num_dummy_cycles;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ num >>= 3;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ num >>= 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ num >>= 1;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < num; ++i)
+ cmd_buf[len++] = 0;
+ }
+
+ /* Dump the SPI command */
+ print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE,
+ 32, 1, cmd_buf, len, false);
+
+#ifdef VERBOSE_DEBUG
+ /* If verbose debug is enabled, also dump the TX data */
+ if (cmd->enable.bits.data && cmd->tx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->tx_buf, cmd->buf_len, false);
+#endif
+}
+#else
+#define atmel_qspi_debug_command(aq, cmd, ifr)
+#endif
+
+static int atmel_qspi_run_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr_tfrtyp, enum spi_nor_protocol proto)
+{
+ u32 iar, icr, ifr, sr;
+ int err = 0;
+
+ iar = 0;
+ icr = 0;
+ ifr = ifr_tfrtyp;
+
+ /* Set the SPI protocol */
+ switch (proto) {
+ case SNOR_PROTO_1_1_1:
+ ifr |= QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
+ break;
+
+ case SNOR_PROTO_1_1_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_OUTPUT;
+ break;
+
+ case SNOR_PROTO_1_1_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_OUTPUT;
+ break;
+
+ case SNOR_PROTO_1_2_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_IO;
+ break;
+
+ case SNOR_PROTO_1_4_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_IO;
+ break;
+
+ case SNOR_PROTO_2_2_2:
+ ifr |= QSPI_IFR_WIDTH_DUAL_CMD;
+ break;
+
+ case SNOR_PROTO_4_4_4:
+ ifr |= QSPI_IFR_WIDTH_QUAD_CMD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Compute instruction parameters */
+ if (cmd->enable.bits.instruction) {
+ icr |= QSPI_ICR_INST(cmd->instruction);
+ ifr |= QSPI_IFR_INSTEN;
+ }
+
+ /* Compute address parameters */
+ switch (cmd->enable.bits.address) {
+ case 4:
+ ifr |= QSPI_IFR_ADDRL;
+ /* fall through to the 24bit (3 byte) address case. */
+ case 3:
+ iar = (cmd->enable.bits.data) ? 0 : cmd->address;
+ ifr |= QSPI_IFR_ADDREN;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Compute option parameters */
+ if (cmd->enable.bits.mode && cmd->num_mode_cycles) {
+ u32 mode_cycle_bits, mode_bits;
+
+ icr |= QSPI_ICR_OPT(cmd->mode);
+ ifr |= QSPI_IFR_OPTEN;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ mode_cycle_bits = 1;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ mode_cycle_bits = 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ mode_cycle_bits = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mode_bits = cmd->num_mode_cycles * mode_cycle_bits;
+ switch (mode_bits) {
+ case 1:
+ ifr |= QSPI_IFR_OPTL_1BIT;
+ break;
+
+ case 2:
+ ifr |= QSPI_IFR_OPTL_2BIT;
+ break;
+
+ case 4:
+ ifr |= QSPI_IFR_OPTL_4BIT;
+ break;
+
+ case 8:
+ ifr |= QSPI_IFR_OPTL_8BIT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Set number of dummy cycles */
+ if (cmd->enable.bits.dummy)
+ ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles);
+
+ /* Set data enable */
+ if (cmd->enable.bits.data) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ /* Special case for Continuous Read Mode */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ ifr |= QSPI_IFR_CRM;
+ }
+
+ /* Clear pending interrupts */
+ (void)qspi_readl(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ atmel_qspi_debug_command(aq, cmd, ifr);
+ qspi_writel(aq, QSPI_IAR, iar);
+ qspi_writel(aq, QSPI_ICR, icr);
+ qspi_writel(aq, QSPI_IFR, ifr);
+
+ /* Skip to the final steps if there is no data */
+ if (!cmd->enable.bits.data)
+ goto no_data;
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)qspi_readl(aq, QSPI_IFR);
+
+ /* Stop here for continuous read */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ return 0;
+ /* Send/Receive data */
+ err = atmel_qspi_run_transfer(aq, cmd);
+
+ /* Release the chip-select */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+
+ if (err)
+ return err;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ /*
+ * If verbose debug is enabled, also dump the RX data in addition to
+ * the SPI command previously dumped by atmel_qspi_debug_command()
+ */
+ if (cmd->rx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->rx_buf, cmd->buf_len, false);
+#endif
+no_data:
+ /* Poll INSTRuction End status */
+ sr = qspi_readl(aq, QSPI_SR);
+ if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ return err;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+ qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+
+ return err;
+}
+
+static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = opcode;
+ cmd.rx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
+ nor->reg_proto);
+}
+
+static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = (buf != NULL && len > 0);
+ cmd.instruction = opcode;
+ cmd.tx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ nor->reg_proto);
+}
+
+static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *write_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ ssize_t ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->program_opcode;
+ cmd.address = (u32)to;
+ cmd.tx_buf = write_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
+ nor->write_proto);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.instruction = nor->erase_opcode;
+ cmd.address = (u32)offs;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ nor->reg_proto);
+}
+
+static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ u8 num_mode_cycles, num_dummy_cycles;
+ ssize_t ret;
+
+ if (nor->read_dummy >= 2) {
+ num_mode_cycles = 2;
+ num_dummy_cycles = nor->read_dummy - 2;
+ } else {
+ num_mode_cycles = nor->read_dummy;
+ num_dummy_cycles = 0;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.mode = (num_mode_cycles > 0);
+ cmd.enable.bits.dummy = (num_dummy_cycles > 0);
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->read_opcode;
+ cmd.address = (u32)from;
+ cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */
+ cmd.num_mode_cycles = num_mode_cycles;
+ cmd.num_dummy_cycles = num_dummy_cycles;
+ cmd.rx_buf = read_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
+ nor->read_proto);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_init(struct atmel_qspi *aq)
+{
+ unsigned long src_rate;
+ u32 mr, scr, scbr;
+
+ /* Reset the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+
+ /* Set the QSPI controller in Serial Memory Mode */
+ mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM;
+ qspi_writel(aq, QSPI_MR, mr);
+
+ src_rate = clk_get_rate(aq->clk);
+ if (!src_rate)
+ return -EINVAL;
+
+ /* Compute the QSPI baudrate */
+ scbr = DIV_ROUND_UP(src_rate, aq->clk_rate);
+ if (scbr > 0)
+ scbr--;
+ scr = QSPI_SCR_SCBR(scbr);
+ qspi_writel(aq, QSPI_SCR, scr);
+
+ /* Enable the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+
+ return 0;
+}
+
+static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+ u32 status, mask, pending;
+
+ status = qspi_readl(aq, QSPI_SR);
+ mask = qspi_readl(aq, QSPI_IMR);
+ pending = status & mask;
+
+ if (!pending)
+ return IRQ_NONE;
+
+ aq->pending |= pending;
+ if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ complete(&aq->cmd_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int atmel_qspi_probe(struct platform_device *pdev)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_2_2 |
+ SNOR_HWCAPS_READ_2_2_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_READ_1_4_4 |
+ SNOR_HWCAPS_READ_4_4_4 |
+ SNOR_HWCAPS_PP |
+ SNOR_HWCAPS_PP_1_1_4 |
+ SNOR_HWCAPS_PP_1_4_4 |
+ SNOR_HWCAPS_PP_4_4_4,
+ };
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct atmel_qspi *aq;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int irq, err = 0;
+
+ if (of_get_child_count(np) != 1)
+ return -ENODEV;
+ child = of_get_next_child(np, NULL);
+
+ aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL);
+ if (!aq) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, aq);
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
+ /* Map the registers */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ aq->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->regs)) {
+ dev_err(&pdev->dev, "missing registers\n");
+ err = PTR_ERR(aq->regs);
+ goto exit;
+ }
+
+ /* Map the AHB memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
+ aq->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->mem)) {
+ dev_err(&pdev->dev, "missing AHB memory\n");
+ err = PTR_ERR(aq->mem);
+ goto exit;
+ }
+
+ /* Get the peripheral clock */
+ aq->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aq->clk)) {
+ dev_err(&pdev->dev, "missing peripheral clock\n");
+ err = PTR_ERR(aq->clk);
+ goto exit;
+ }
+
+ /* Enable the peripheral clock */
+ err = clk_prepare_enable(aq->clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
+ goto exit;
+ }
+
+ /* Request the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ err = irq;
+ goto disable_clk;
+ }
+ err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
+ 0, dev_name(&pdev->dev), aq);
+ if (err)
+ goto disable_clk;
+
+ /* Setup the spi-nor */
+ nor = &aq->nor;
+ mtd = &nor->mtd;
+
+ nor->dev = &pdev->dev;
+ spi_nor_set_flash_node(nor, child);
+ nor->priv = aq;
+ mtd->priv = nor;
+
+ nor->read_reg = atmel_qspi_read_reg;
+ nor->write_reg = atmel_qspi_write_reg;
+ nor->read = atmel_qspi_read;
+ nor->write = atmel_qspi_write;
+ nor->erase = atmel_qspi_erase;
+
+ err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate);
+ if (err < 0)
+ goto disable_clk;
+
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto disable_clk;
+
+ err = spi_nor_scan(nor, NULL, &hwcaps);
+ if (err)
+ goto disable_clk;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto disable_clk;
+
+ of_node_put(child);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(aq->clk);
+exit:
+ of_node_put(child);
+
+ return err;
+}
+
+static int atmel_qspi_remove(struct platform_device *pdev)
+{
+ struct atmel_qspi *aq = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&aq->nor.mtd);
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
+ clk_disable_unprepare(aq->clk);
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aq->clk);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_resume(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_prepare_enable(aq->clk);
+
+ return atmel_qspi_init(aq);
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
+ atmel_qspi_resume);
+
+static const struct of_device_id atmel_qspi_dt_ids[] = {
+ { .compatible = "atmel,sama5d2-qspi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
+
+static struct platform_driver atmel_qspi_driver = {
+ .driver = {
+ .name = "atmel_qspi",
+ .of_match_table = atmel_qspi_dt_ids,
+ .pm = &atmel_qspi_pm_ops,
+ },
+ .probe = atmel_qspi_probe,
+ .remove = atmel_qspi_remove,
+};
+module_platform_driver(atmel_qspi_driver);
+
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_DESCRIPTION("Atmel QSPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
new file mode 100644
index 000000000..a92f531ad
--- /dev/null
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -0,0 +1,1493 @@
+/*
+ * Driver for Cadence QSPI Controller
+ *
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/timer.h>
+
+#define CQSPI_NAME "cadence-qspi"
+#define CQSPI_MAX_CHIPSELECT 16
+
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+
+struct cqspi_st;
+
+struct cqspi_flash_pdata {
+ struct spi_nor nor;
+ struct cqspi_st *cqspi;
+ u32 clk_rate;
+ u32 read_delay;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+ u8 inst_width;
+ u8 addr_width;
+ u8 data_width;
+ u8 cs;
+ bool registered;
+ bool use_direct_mode;
+};
+
+struct cqspi_st {
+ struct platform_device *pdev;
+
+ struct clk *clk;
+ unsigned int sclk;
+
+ void __iomem *iobase;
+ void __iomem *ahb_base;
+ resource_size_t ahb_size;
+ struct completion transfer_complete;
+ struct mutex bus_mutex;
+
+ struct dma_chan *rx_chan;
+ struct completion rx_dma_complete;
+ dma_addr_t mmap_phys_base;
+
+ int current_cs;
+ int current_page_size;
+ int current_erase_size;
+ int current_addr_width;
+ unsigned long master_ref_clk_hz;
+ bool is_decoded_cs;
+ u32 fifo_depth;
+ u32 fifo_width;
+ bool rclk_en;
+ u32 trigger_address;
+ u32 wr_delay;
+ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+};
+
+/* Operation timeout value */
+#define CQSPI_TIMEOUT_MS 500
+#define CQSPI_READ_TIMEOUT_MS 10
+
+/* Instruction type */
+#define CQSPI_INST_TYPE_SINGLE 0
+#define CQSPI_INST_TYPE_DUAL 1
+#define CQSPI_INST_TYPE_QUAD 2
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
+#define CQSPI_DUMMY_CLKS_MAX 31
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
+/* Register map */
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
+#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
+#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
+#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_DMA 0x20
+#define CQSPI_REG_DMA_SINGLE_LSB 0
+#define CQSPI_REG_DMA_BURST_LSB 8
+#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
+#define CQSPI_REG_DMA_BURST_MASK 0xFF
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
+/* Interrupt status bits */
+#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
+#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
+#define CQSPI_REG_IRQ_IND_COMP BIT(2)
+#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
+#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
+#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
+#define CQSPI_REG_IRQ_WATERMARK BIT(6)
+#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
+
+#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_SRAM_FULL | \
+ CQSPI_REG_IRQ_IND_COMP)
+
+#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
+ CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_UNDERFLOW)
+
+#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+
+static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ u32 val;
+
+ while (1) {
+ val = readl(reg);
+ if (clear)
+ val = ~val;
+ val &= mask;
+
+ if (val == mask)
+ return 0;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+ }
+}
+
+static bool cqspi_is_idle(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+
+ return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
+}
+
+static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
+
+ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Clear interrupt */
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+ if (irq_status)
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ u32 rdreg = 0;
+
+ rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+ return rdreg;
+}
+
+static int cqspi_wait_idle(struct cqspi_st *cqspi)
+{
+ const unsigned int poll_idle_retry = 3;
+ unsigned int count = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ while (1) {
+ /*
+ * Read few times in succession to ensure the controller
+ * is indeed idle, that is, the bit does not transition
+ * low again.
+ */
+ if (cqspi_is_idle(cqspi))
+ count++;
+ else
+ count = 0;
+
+ if (count >= poll_idle_retry)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, in busy mode. */
+ dev_err(&cqspi->pdev->dev,
+ "QSPI is still busy after %dms timeout.\n",
+ CQSPI_TIMEOUT_MS);
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+}
+
+static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ int ret;
+
+ /* Write the CMDCTRL without start execution. */
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+ /* Start execute */
+ reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+ /* Polling for completion. */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
+ CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
+ if (ret) {
+ dev_err(&cqspi->pdev->dev,
+ "Flash command execution timed out.\n");
+ return ret;
+ }
+
+ /* Polling QSPI idle status. */
+ return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_command_read(struct spi_nor *nor,
+ const u8 *txbuf, const unsigned n_tx,
+ u8 *rxbuf, const unsigned n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int rdreg;
+ unsigned int reg;
+ unsigned int read_len;
+ int status;
+
+ if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+ dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ n_rx, rxbuf);
+ return -EINVAL;
+ }
+
+ reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+ /* 0 means 1 byte. */
+ reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ status = cqspi_exec_flash_cmd(cqspi, reg);
+ if (status)
+ return status;
+
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ read_len = (n_rx > 4) ? 4 : n_rx;
+ memcpy(rxbuf, &reg, read_len);
+ rxbuf += read_len;
+
+ if (n_rx > 4) {
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+ read_len = n_rx - read_len;
+ memcpy(rxbuf, &reg, read_len);
+ }
+
+ return 0;
+}
+
+static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
+ const u8 *txbuf, const unsigned n_tx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ unsigned int data;
+ int ret;
+
+ if (n_tx > 4 || (n_tx && !txbuf)) {
+ dev_err(nor->dev,
+ "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ n_tx, txbuf);
+ return -EINVAL;
+ }
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ if (n_tx) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ data = 0;
+ memcpy(&data, txbuf, n_tx);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+ }
+
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+ return ret;
+}
+
+static int cqspi_command_write_addr(struct spi_nor *nor,
+ const u8 opcode, const unsigned int addr)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
+
+ return cqspi_exec_flash_cmd(cqspi, reg);
+}
+
+static int cqspi_read_setup(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int dummy_clk = 0;
+ unsigned int reg;
+
+ reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+
+ /* Setup dummy clock cycles */
+ dummy_clk = nor->read_dummy;
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ return -EOPNOTSUPP;
+
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ /* Set mode bits high to ensure chip doesn't enter XIP */
+ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Need to subtract the mode byte (8 clocks). */
+ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+ dummy_clk -= 8;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ /* Set address width */
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ void __iomem *ahb_base = cqspi->ahb_base;
+ unsigned int remaining = n_rx;
+ unsigned int mod_bytes = n_rx % 4;
+ unsigned int bytes_to_read = 0;
+ u8 *rxbuf_end = rxbuf + n_rx;
+ int ret = 0;
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ while (remaining > 0) {
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+
+ if (ret && bytes_to_read == 0) {
+ dev_err(nor->dev, "Indirect read timeout, no bytes\n");
+ goto failrd;
+ }
+
+ while (bytes_to_read != 0) {
+ unsigned int word_remain = round_down(remaining, 4);
+
+ bytes_to_read *= cqspi->fifo_width;
+ bytes_to_read = bytes_to_read > remaining ?
+ remaining : bytes_to_read;
+ bytes_to_read = round_down(bytes_to_read, 4);
+ /* Read 4 byte word chunks then single bytes */
+ if (bytes_to_read) {
+ ioread32_rep(ahb_base, rxbuf,
+ (bytes_to_read / 4));
+ } else if (!word_remain && mod_bytes) {
+ unsigned int temp = ioread32(ahb_base);
+
+ bytes_to_read = mod_bytes;
+ memcpy(rxbuf, &temp, min((unsigned int)
+ (rxbuf_end - rxbuf),
+ bytes_to_read));
+ }
+ rxbuf += bytes_to_read;
+ remaining -= bytes_to_read;
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+ }
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
+
+ return 0;
+
+failrd:
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+ return ret;
+}
+
+static int cqspi_write_setup(struct spi_nor *nor)
+{
+ unsigned int reg;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+
+ /* Set opcode. */
+ reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
+{
+ const unsigned int page_size = nor->page_size;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int remaining = n_tx;
+ unsigned int write_bytes;
+ int ret;
+
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
+
+ while (remaining > 0) {
+ size_t write_words, mod_bytes;
+
+ write_bytes = remaining > page_size ? page_size : remaining;
+ write_words = write_bytes / 4;
+ mod_bytes = write_bytes % 4;
+ /* Write 4 bytes at a time then single bytes. */
+ if (write_words) {
+ iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
+ txbuf += (write_words * 4);
+ }
+ if (mod_bytes) {
+ unsigned int temp = 0xFFFFFFFF;
+
+ memcpy(&temp, txbuf, mod_bytes);
+ iowrite32(temp, cqspi->ahb_base);
+ txbuf += mod_bytes;
+ }
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
+ dev_err(nor->dev, "Indirect write timeout\n");
+ ret = -ETIMEDOUT;
+ goto failwr;
+ }
+
+ remaining -= write_bytes;
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
+ CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect write completion error (%i)\n", ret);
+ goto failwr;
+ }
+
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
+
+ cqspi_wait_idle(cqspi);
+
+ return 0;
+
+failwr:
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect write */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ return ret;
+}
+
+static void cqspi_chipselect(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int chip_select = f_pdata->cs;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ if (cqspi->is_decoded_cs) {
+ reg |= CQSPI_REG_CONFIG_DECODE_MASK;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
+
+ /* Convert CS if without decoder.
+ * CS0 to 4b'1110
+ * CS1 to 4b'1101
+ * CS2 to 4b'1011
+ * CS3 to 4b'0111
+ */
+ chip_select = 0xF & ~(1 << chip_select);
+ }
+
+ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ unsigned int reg;
+
+ /* configure page size and block size. */
+ reg = readl(iobase + CQSPI_REG_SIZE);
+ reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
+ reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+ reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg |= (nor->addr_width - 1);
+ writel(reg, iobase + CQSPI_REG_SIZE);
+
+ /* configure the chip select */
+ cqspi_chipselect(nor);
+
+ /* Store the new configuration of the controller */
+ cqspi->current_page_size = nor->page_size;
+ cqspi->current_erase_size = nor->mtd.erasesize;
+ cqspi->current_addr_width = nor->addr_width;
+}
+
+static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
+ const unsigned int ns_val)
+{
+ unsigned int ticks;
+
+ ticks = ref_clk_hz / 1000; /* kHz */
+ ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
+
+ return ticks;
+}
+
+static void cqspi_delay(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ unsigned int tshsl, tchsh, tslch, tsd2d;
+ unsigned int reg;
+ unsigned int tsclk;
+
+ /* calculate the number of ref ticks for one sclk tick */
+ tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
+
+ tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
+ /* this particular value must be at least one sclk */
+ if (tshsl < tsclk)
+ tshsl = tsclk;
+
+ tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
+ tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
+ tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
+
+ reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+ << CQSPI_REG_DELAY_TSHSL_LSB;
+ reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+ << CQSPI_REG_DELAY_TCHSH_LSB;
+ reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+ << CQSPI_REG_DELAY_TSLCH_LSB;
+ reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+ << CQSPI_REG_DELAY_TSD2D_LSB;
+ writel(reg, iobase + CQSPI_REG_DELAY);
+}
+
+static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
+{
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, div;
+
+ /* Recalculate the baudrate divisor based on QSPI specification. */
+ div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+ reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_readdata_capture(struct cqspi_st *cqspi,
+ const bool bypass,
+ const unsigned int delay)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_READCAPTURE);
+
+ if (bypass)
+ reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ else
+ reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+
+ reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
+ << CQSPI_REG_READCAPTURE_DELAY_LSB);
+
+ reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
+ << CQSPI_REG_READCAPTURE_DELAY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_READCAPTURE);
+}
+
+static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ if (enable)
+ reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
+ else
+ reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ const unsigned int sclk = f_pdata->clk_rate;
+ int switch_cs = (cqspi->current_cs != f_pdata->cs);
+ int switch_ck = (cqspi->sclk != sclk);
+
+ if ((cqspi->current_page_size != nor->page_size) ||
+ (cqspi->current_erase_size != nor->mtd.erasesize) ||
+ (cqspi->current_addr_width != nor->addr_width))
+ switch_cs = 1;
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Switch chip select. */
+ if (switch_cs) {
+ cqspi->current_cs = f_pdata->cs;
+ cqspi_configure_cs_and_sizes(nor);
+ }
+
+ /* Setup baudrate divisor and delays */
+ if (switch_ck) {
+ cqspi->sclk = sclk;
+ cqspi_config_baudrate_div(cqspi);
+ cqspi_delay(nor);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
+ }
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_set_protocol(struct spi_nor *nor, const int read)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+
+ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+
+ if (read) {
+ switch (nor->read_proto) {
+ case SNOR_PROTO_1_1_1:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case SNOR_PROTO_1_1_2:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case SNOR_PROTO_1_1_4:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ cqspi_configure(nor);
+
+ return 0;
+}
+
+static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ ret = cqspi_write_setup(nor);
+ if (ret)
+ return ret;
+
+ if (f_pdata->use_direct_mode) {
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ ret = cqspi_wait_idle(cqspi);
+ } else {
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
+ }
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static void cqspi_rx_dma_callback(void *param)
+{
+ struct cqspi_st *cqspi = param;
+
+ complete(&cqspi->rx_dma_complete);
+}
+
+static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
+ loff_t from, size_t len)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
+ int ret = 0;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ dma_addr_t dma_dst;
+
+ if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ return 0;
+ }
+
+ dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(nor->dev, dma_dst)) {
+ dev_err(nor->dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+ tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
+ len, flags);
+ if (!tx) {
+ dev_err(nor->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ tx->callback = cqspi_rx_dma_callback;
+ tx->callback_param = cqspi;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&cqspi->rx_dma_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(nor->dev, "dma_submit_error %d\n", cookie);
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(cqspi->rx_chan);
+ if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
+ msecs_to_jiffies(len))) {
+ dmaengine_terminate_sync(cqspi->rx_chan);
+ dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ goto err_unmap;
+ }
+
+err_unmap:
+ dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 1);
+ if (ret)
+ return ret;
+
+ ret = cqspi_read_setup(nor);
+ if (ret)
+ return ret;
+
+ if (f_pdata->use_direct_mode)
+ ret = cqspi_direct_read_execute(nor, buf, from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static int cqspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Set up command buffer. */
+ ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_lock(&cqspi->bus_mutex);
+
+ return 0;
+}
+
+static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_unlock(&cqspi->bus_mutex);
+}
+
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+
+ return ret;
+}
+
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_write(nor, opcode, buf, len);
+
+ return ret;
+}
+
+static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
+ struct cqspi_flash_pdata *f_pdata,
+ struct device_node *np)
+{
+ if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
+ dev_err(&pdev->dev, "couldn't determine read-delay\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
+ dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cqspi_of_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+
+ cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
+
+ if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-width\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,trigger-address",
+ &cqspi->trigger_address)) {
+ dev_err(&pdev->dev, "couldn't determine trigger-address\n");
+ return -ENXIO;
+ }
+
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
+ return 0;
+}
+
+static void cqspi_controller_init(struct cqspi_st *cqspi)
+{
+ u32 reg;
+
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Configure the remap address register, no remap */
+ writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+
+ /* Disable all interrupts. */
+ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+
+ /* Configure the SRAM split to 1:1 . */
+ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
+
+ /* Load indirect trigger address. */
+ writel(cqspi->trigger_address,
+ cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Program read watermark -- 1/2 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
+ cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
+ /* Program write watermark -- 1/8 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
+ cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ cqspi->rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cqspi->rx_chan)) {
+ dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+ cqspi->rx_chan = NULL;
+ }
+ init_completion(&cqspi->rx_dma_complete);
+}
+
+static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ struct cqspi_flash_pdata *f_pdata;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ unsigned int cs;
+ int i, ret;
+
+ /* Get flash device data */
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "Couldn't determine chip select.\n");
+ goto err;
+ }
+
+ if (cs >= CQSPI_MAX_CHIPSELECT) {
+ ret = -EINVAL;
+ dev_err(dev, "Chip select %d out of range.\n", cs);
+ goto err;
+ }
+
+ f_pdata = &cqspi->f_pdata[cs];
+ f_pdata->cqspi = cqspi;
+ f_pdata->cs = cs;
+
+ ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
+ if (ret)
+ goto err;
+
+ nor = &f_pdata->nor;
+ mtd = &nor->mtd;
+
+ mtd->priv = nor;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+ nor->priv = f_pdata;
+
+ nor->read_reg = cqspi_read_reg;
+ nor->write_reg = cqspi_write_reg;
+ nor->read = cqspi_read;
+ nor->write = cqspi_write;
+ nor->erase = cqspi_erase;
+ nor->prepare = cqspi_prep;
+ nor->unprepare = cqspi_unprep;
+
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
+ dev_name(dev), cs);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err;
+
+ f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+
+ if (!cqspi->rx_chan)
+ cqspi_request_mmap_dma(cqspi);
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+ return ret;
+}
+
+static int cqspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct cqspi_st *cqspi;
+ struct resource *res;
+ struct resource *res_ahb;
+ unsigned long data;
+ int ret;
+ int irq;
+
+ cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
+ if (!cqspi)
+ return -ENOMEM;
+
+ mutex_init(&cqspi->bus_mutex);
+ cqspi->pdev = pdev;
+ platform_set_drvdata(pdev, cqspi);
+
+ /* Obtain configuration from OF. */
+ ret = cqspi_of_get_pdata(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot get mandatory OF data.\n");
+ return -ENODEV;
+ }
+
+ /* Obtain QSPI clock. */
+ cqspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cqspi->clk)) {
+ dev_err(dev, "Cannot claim QSPI clock.\n");
+ return PTR_ERR(cqspi->clk);
+ }
+
+ /* Obtain and remap controller address. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cqspi->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cqspi->iobase)) {
+ dev_err(dev, "Cannot remap controller address.\n");
+ return PTR_ERR(cqspi->iobase);
+ }
+
+ /* Obtain and remap AHB address. */
+ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+ if (IS_ERR(cqspi->ahb_base)) {
+ dev_err(dev, "Cannot remap AHB address.\n");
+ return PTR_ERR(cqspi->ahb_base);
+ }
+ cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
+ cqspi->ahb_size = resource_size(res_ahb);
+
+ init_completion(&cqspi->transfer_complete);
+
+ /* Obtain IRQ line. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot obtain IRQ.\n");
+ return -ENXIO;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(cqspi->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable QSPI clock.\n");
+ goto probe_clk_failed;
+ }
+
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ data = (unsigned long)of_device_get_match_data(dev);
+ if (data & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
+
+ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+ pdev->name, cqspi);
+ if (ret) {
+ dev_err(dev, "Cannot request IRQ.\n");
+ goto probe_irq_failed;
+ }
+
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ ret = cqspi_setup_flash(cqspi, np);
+ if (ret) {
+ dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ return ret;
+probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
+ clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cqspi_remove(struct platform_device *pdev)
+{
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+
+ cqspi_controller_enable(cqspi, 0);
+
+ if (cqspi->rx_chan)
+ dma_release_channel(cqspi->rx_chan);
+
+ clk_disable_unprepare(cqspi->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 0);
+ return 0;
+}
+
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops cqspi__dev_pm_ops = {
+ .suspend = cqspi_suspend,
+ .resume = cqspi_resume,
+};
+
+#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
+#else
+#define CQSPI_DEV_PM_OPS NULL
+#endif
+
+static const struct of_device_id cqspi_dt_ids[] = {
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = (void *)0,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
+
+static struct platform_driver cqspi_platform_driver = {
+ .probe = cqspi_probe,
+ .remove = cqspi_remove,
+ .driver = {
+ .name = CQSPI_NAME,
+ .pm = CQSPI_DEV_PM_OPS,
+ .of_match_table = cqspi_dt_ids,
+ },
+};
+
+module_platform_driver(cqspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CQSPI_NAME);
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
new file mode 100644
index 000000000..1ff3430f8
--- /dev/null
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -0,0 +1,1224 @@
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+/* Controller needs driver to swap endian */
+#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
+/*
+ * TKT253890, Controller needs driver to fill txfifo till 16 byte to
+ * trigger data transfer even though extern data will not transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 (1 << 2)
+/* Controller cannot wake up from wait mode, TKT245618 */
+#define QUADSPI_QUIRK_TKT245618 (1 << 3)
+
+/* The registers */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_SHIFT 16
+#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
+#define QUADSPI_MCR_MDIS_SHIFT 14
+#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
+#define QUADSPI_MCR_CLR_TXF_SHIFT 11
+#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
+#define QUADSPI_MCR_CLR_RXF_SHIFT 10
+#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
+#define QUADSPI_MCR_DDR_EN_SHIFT 7
+#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
+#define QUADSPI_MCR_END_CFG_SHIFT 2
+#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
+#define QUADSPI_MCR_SWRSTHD_SHIFT 1
+#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
+#define QUADSPI_MCR_SWRSTSD_SHIFT 0
+#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID_SHIFT 24
+#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0CR 0x10
+#define QUADSPI_BUF1CR 0x14
+#define QUADSPI_BUF2CR 0x18
+#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
+#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
+#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
+#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
+#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
+#define QUADSPI_BFGENCR_SEQID_SHIFT 12
+#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_SHIFT 16
+#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
+#define QUADSPI_SMPR_FSDLY_SHIFT 6
+#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
+#define QUADSPI_SMPR_FSPHS_SHIFT 5
+#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
+#define QUADSPI_SMPR_HSENA_SHIFT 0
+#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
+
+#define QUADSPI_RBSR 0x10c
+#define QUADSPI_RBSR_RDBFL_SHIFT 8
+#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK 0x1F
+#define QUADSPI_RBCT_RXBRD_SHIFT 8
+#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
+
+#define QUADSPI_TBSR 0x150
+#define QUADSPI_TBDR 0x154
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_SHIFT 1
+#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
+#define QUADSPI_SR_AHB_ACC_SHIFT 2
+#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK 0x1
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR 0x200
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK 0x1
+#define QUADSPI_LCKER_UNLOCK 0x2
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE (0x1 << 0)
+
+#define QUADSPI_LUT_BASE 0x310
+
+/*
+ * The definition of the LUT register shows below:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define OPRND0_SHIFT 0
+#define PAD0_SHIFT 8
+#define INSTR0_SHIFT 10
+#define OPRND1_SHIFT 16
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the lines number of IO[0:3].
+ * For example, the Quad read need four IO lines, so you should
+ * set LUT_PAD4 which means we use four IO lines.
+ */
+#define LUT_PAD1 0
+#define LUT_PAD2 1
+#define LUT_PAD4 2
+
+/* Oprands for the LUT register. */
+#define ADDR24BIT 0x18
+#define ADDR32BIT 0x20
+
+/* Macros for constructing the LUT register. */
+#define LUT0(ins, pad, opr) \
+ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
+ ((LUT_##ins) << INSTR0_SHIFT))
+
+#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
+
+/* other macros for LUT register. */
+#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
+#define QUADSPI_LUT_NUM 64
+
+/* SEQID -- we can have 16 seqids at most. */
+#define SEQID_READ 0
+#define SEQID_WREN 1
+#define SEQID_WRDI 2
+#define SEQID_RDSR 3
+#define SEQID_SE 4
+#define SEQID_CHIP_ERASE 5
+#define SEQID_PP 6
+#define SEQID_RDID 7
+#define SEQID_WRSR 8
+#define SEQID_RDCR 9
+#define SEQID_EN4B 10
+#define SEQID_BRWR 11
+
+#define QUADSPI_MIN_IOMAP SZ_4M
+
+enum fsl_qspi_devtype {
+ FSL_QUADSPI_VYBRID,
+ FSL_QUADSPI_IMX6SX,
+ FSL_QUADSPI_IMX7D,
+ FSL_QUADSPI_IMX6UL,
+ FSL_QUADSPI_LS1021A,
+ FSL_QUADSPI_LS2080A,
+};
+
+struct fsl_qspi_devtype_data {
+ enum fsl_qspi_devtype devtype;
+ int rxfifo;
+ int txfifo;
+ int ahb_buf_size;
+ int driver_data;
+};
+
+static const struct fsl_qspi_devtype_data vybrid_data = {
+ .devtype = FSL_QUADSPI_VYBRID,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
+};
+
+static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .devtype = FSL_QUADSPI_IMX6SX,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_4X_INT_CLK
+ | QUADSPI_QUIRK_TKT245618,
+};
+
+static const struct fsl_qspi_devtype_data imx7d_data = {
+ .devtype = FSL_QUADSPI_IMX7D,
+ .rxfifo = 512,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .devtype = FSL_QUADSPI_IMX6UL,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static struct fsl_qspi_devtype_data ls1021a_data = {
+ .devtype = FSL_QUADSPI_LS1021A,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024,
+ .driver_data = 0,
+};
+
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .devtype = FSL_QUADSPI_LS2080A,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890,
+};
+
+
+#define FSL_QSPI_MAX_CHIP 4
+struct fsl_qspi {
+ struct spi_nor nor[FSL_QSPI_MAX_CHIP];
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_offs;
+ u32 memmap_len;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct fsl_qspi_devtype_data *devtype_data;
+ u32 nor_size;
+ u32 nor_num;
+ u32 clk_rate;
+ unsigned int chip_base_addr; /* We may support two chips. */
+ bool has_second_chip;
+ bool big_endian;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+};
+
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The qSPI controller's endian is independent of the CPU core's endian.
+ * So far, although the CPU core is little-endian but the qSPI have two
+ * versions for big-endian and little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+/*
+ * An IC bug makes us to re-arrange the 32-bit data.
+ * The following chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return needs_swap_endian(q) ? __swab32(a) : a;
+}
+
+static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
+{
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
+{
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = qspi_readl(q, q->iobase + QUADSPI_FR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
+ return IRQ_HANDLED;
+}
+
+static void fsl_qspi_init_lut(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int rxfifo = q->devtype_data->rxfifo;
+ u32 lut_base;
+ int i;
+
+ struct spi_nor *nor = &q->nor[0];
+ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
+ u8 read_op = nor->read_opcode;
+ u8 read_dm = nor->read_dummy;
+
+ fsl_qspi_unlock_lut(q);
+
+ /* Clear all the LUT table */
+ for (i = 0; i < QUADSPI_LUT_NUM; i++)
+ qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
+
+ /* Read */
+ lut_base = SEQID_READ * 4;
+
+ qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
+ LUT1(FSL_READ, PAD4, rxfifo),
+ base + QUADSPI_LUT(lut_base + 1));
+
+ /* Write enable */
+ lut_base = SEQID_WREN * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Page Program */
+ lut_base = SEQID_PP * 4;
+
+ qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
+ LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
+ base + QUADSPI_LUT(lut_base + 1));
+
+ /* Read Status */
+ lut_base = SEQID_RDSR * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
+ LUT1(FSL_READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase a sector */
+ lut_base = SEQID_SE * 4;
+
+ qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
+ LUT1(ADDR, PAD1, addrlen),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Erase the whole chip */
+ lut_base = SEQID_CHIP_ERASE * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
+ base + QUADSPI_LUT(lut_base));
+
+ /* READ ID */
+ lut_base = SEQID_RDID * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
+ LUT1(FSL_READ, PAD1, 0x8),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write Register */
+ lut_base = SEQID_WRSR * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
+ LUT1(FSL_WRITE, PAD1, 0x2),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Read Configuration Register */
+ lut_base = SEQID_RDCR * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
+ LUT1(FSL_READ, PAD1, 0x1),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Write disable */
+ lut_base = SEQID_WRDI * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Micron) */
+ lut_base = SEQID_EN4B * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
+ base + QUADSPI_LUT(lut_base));
+
+ /* Enter 4 Byte Mode (Spansion) */
+ lut_base = SEQID_BRWR * 4;
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
+ base + QUADSPI_LUT(lut_base));
+
+ fsl_qspi_lock_lut(q);
+}
+
+/* Get the SEQID for the command */
+static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
+{
+ switch (cmd) {
+ case SPINOR_OP_READ_1_1_4:
+ case SPINOR_OP_READ_1_1_4_4B:
+ return SEQID_READ;
+ case SPINOR_OP_WREN:
+ return SEQID_WREN;
+ case SPINOR_OP_WRDI:
+ return SEQID_WRDI;
+ case SPINOR_OP_RDSR:
+ return SEQID_RDSR;
+ case SPINOR_OP_SE:
+ return SEQID_SE;
+ case SPINOR_OP_CHIP_ERASE:
+ return SEQID_CHIP_ERASE;
+ case SPINOR_OP_PP:
+ return SEQID_PP;
+ case SPINOR_OP_RDID:
+ return SEQID_RDID;
+ case SPINOR_OP_WRSR:
+ return SEQID_WRSR;
+ case SPINOR_OP_RDCR:
+ return SEQID_RDCR;
+ case SPINOR_OP_EN4B:
+ return SEQID_EN4B;
+ case SPINOR_OP_BRWR:
+ return SEQID_BRWR;
+ default:
+ if (cmd == q->nor[0].erase_opcode)
+ return SEQID_SE;
+ dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+ u32 reg, reg2;
+ int err;
+
+ init_completion(&q->c);
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
+ q->chip_base_addr, addr, len, cmd);
+
+ /* save the reg */
+ reg = qspi_readl(q, base + QUADSPI_MCR);
+
+ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
+ base + QUADSPI_SFAR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ base + QUADSPI_RBCT);
+ qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+
+ do {
+ reg2 = qspi_readl(q, base + QUADSPI_SR);
+ if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
+ udelay(1);
+ dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
+ continue;
+ }
+ break;
+ } while (1);
+
+ /* trigger the LUT now */
+ seqid = fsl_qspi_get_seqid(q, cmd);
+ if (seqid < 0)
+ return seqid;
+
+ qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
+ base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
+ dev_err(q->dev,
+ "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
+ cmd, addr, qspi_readl(q, base + QUADSPI_FR),
+ qspi_readl(q, base + QUADSPI_SR));
+ err = -ETIMEDOUT;
+ } else {
+ err = 0;
+ }
+
+ /* restore the MCR */
+ qspi_writel(q, reg, base + QUADSPI_MCR);
+
+ return err;
+}
+
+/* Read out the data from the QUADSPI_RBDR buffer registers. */
+static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
+{
+ u32 tmp;
+ int i = 0;
+
+ while (len > 0) {
+ tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4);
+ tmp = fsl_qspi_endian_xchg(q, tmp);
+ dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
+ q->chip_base_addr, tmp);
+
+ if (len >= 4) {
+ *((u32 *)rxbuf) = tmp;
+ rxbuf += 4;
+ } else {
+ memcpy(rxbuf, &tmp, len);
+ break;
+ }
+
+ len -= 4;
+ i++;
+ }
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing,
+ * we need to invalidate the AHB buffer. If we do not do so, we may read out
+ * the wrong data. The spec tells us reset the AHB domain and Serial Flash
+ * domain at the same time.
+ */
+static inline void fsl_qspi_invalid(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+}
+
+static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+ u8 opcode, unsigned int to, u32 *txbuf,
+ unsigned count)
+{
+ int ret, i, j;
+ u32 tmp;
+
+ dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
+ q->chip_base_addr, to, count);
+
+ /* clear the TX FIFO. */
+ tmp = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
+
+ /* fill the TX data to the FIFO */
+ for (j = 0, i = ((count + 3) / 4); j < i; j++) {
+ tmp = fsl_qspi_endian_xchg(q, *txbuf);
+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
+ txbuf++;
+ }
+
+ /* fill the TXFIFO upto 16 bytes for i.MX7d */
+ if (needs_fill_txfifo(q))
+ for (; i < 4; i++)
+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
+
+ /* Trigger it */
+ ret = fsl_qspi_runcmd(q, opcode, to, count);
+
+ if (ret == 0)
+ return count;
+
+ return ret;
+}
+
+static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
+{
+ int nor_size = q->nor_size;
+ void __iomem *base = q->iobase;
+
+ qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+ qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+ qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+ qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+}
+
+/*
+ * There are two different ways to read out the data from the flash:
+ * the "IP Command Read" and the "AHB Command Read".
+ *
+ * The IC guy suggests we use the "AHB Command Read" which is faster
+ * then the "IP Command Read". (What's more is that there is a bug in
+ * the "IP Command Read" in the Vybrid.)
+ *
+ * After we set up the registers for the "AHB Command Read", we can use
+ * the memcpy to read the data directly. A "missed" access to the buffer
+ * causes the controller to clear the buffer, and use the sequence pointed
+ * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
+ */
+static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ int seqid;
+
+ /* AHB configuration for access buffer 0/1/2 .*/
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the
+ * read performance.
+ */
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ ((q->devtype_data->ahb_buf_size / 8)
+ << QUADSPI_BUF3CR_ADATSZ_SHIFT),
+ base + QUADSPI_BUF3CR);
+
+ /* We only use the buffer3 */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
+
+ /* Set the default lut sequence for AHB Read. */
+ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ if (seqid < 0)
+ return seqid;
+
+ qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ q->iobase + QUADSPI_BFGENCR);
+
+ return 0;
+}
+
+/* This function was used to prepare and enable QSPI clock */
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+/* This function was used to disable and unprepare QSPI clock */
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+
+}
+
+/* We use this function to do some basic init for spi_nor_scan(). */
+static int fsl_qspi_nor_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg;
+ int ret;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it in the future. */
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
+ /* Init the LUT table. */
+ fsl_qspi_init_lut(q);
+
+ /* Disable the module */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* Enable the module */
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* clear all interrupt status */
+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
+
+ /* enable the interrupt */
+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
+{
+ unsigned long rate = q->clk_rate;
+ int ret;
+
+ if (needs_4x_clock(q))
+ rate *= 4;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Init the LUT table again. */
+ fsl_qspi_init_lut(q);
+
+ /* Init for AHB read */
+ return fsl_qspi_init_ahb_read(q);
+}
+
+static const struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
+{
+ q->chip_base_addr = q->nor_size * (nor - q->nor);
+}
+
+static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+ struct fsl_qspi *q = nor->priv;
+
+ ret = fsl_qspi_runcmd(q, opcode, 0, len);
+ if (ret)
+ return ret;
+
+ fsl_qspi_read_data(q, len, buf);
+ return 0;
+}
+
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ if (!buf) {
+ ret = fsl_qspi_runcmd(q, opcode, 0, 1);
+ if (ret)
+ return ret;
+
+ if (opcode == SPINOR_OP_CHIP_ERASE)
+ fsl_qspi_invalid(q);
+
+ } else if (len > 0) {
+ ret = fsl_qspi_nor_write(q, nor, opcode, 0,
+ (u32 *)buf, len);
+ if (ret > 0)
+ return 0;
+ } else {
+ dev_err(q->dev, "invalid cmd %d\n", opcode);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+ ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len);
+
+ /* invalid the data in the AHB buffer. */
+ fsl_qspi_invalid(q);
+ return ret;
+}
+
+static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
+{
+ struct fsl_qspi *q = nor->priv;
+ u8 cmd = nor->read_opcode;
+
+ /* if necessary,ioremap buffer before AHB read, */
+ if (!q->ahb_addr) {
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ /* ioremap if the data requested is out of range */
+ } else if (q->chip_base_addr + from < q->memmap_offs
+ || q->chip_base_addr + from + len >
+ q->memmap_offs + q->memmap_len) {
+ iounmap(q->ahb_addr);
+
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n",
+ cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
+
+ /* Read out the data directly from the AHB buffer.*/
+ memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
+
+ return len;
+}
+
+static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
+ nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
+
+ ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
+ if (ret)
+ return ret;
+
+ fsl_qspi_invalid(q);
+ return 0;
+}
+
+static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+ int ret;
+
+ mutex_lock(&q->lock);
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ goto err_mutex;
+
+ fsl_qspi_set_base_addr(q, nor);
+ return 0;
+
+err_mutex:
+ mutex_unlock(&q->lock);
+ return ret;
+}
+
+static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct fsl_qspi *q = nor->priv;
+
+ fsl_qspi_clk_disable_unprep(q);
+ mutex_unlock(&q->lock);
+}
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct fsl_qspi *q;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int ret, i = 0;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->nor_num = of_get_child_count(dev->of_node);
+ if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
+ return -ENODEV;
+
+ q->dev = dev;
+ q->devtype_data = of_device_get_match_data(dev);
+ if (!q->devtype_data)
+ return -ENODEV;
+ platform_set_drvdata(pdev, q);
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase))
+ return PTR_ERR(q->iobase);
+
+ q->big_endian = of_property_read_bool(np, "big-endian");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en))
+ return PTR_ERR(q->clk_en);
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk))
+ return PTR_ERR(q->clk);
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto clk_failed;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto irq_failed;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto irq_failed;
+ }
+
+ ret = fsl_qspi_nor_setup(q);
+ if (ret)
+ goto irq_failed;
+
+ if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
+ q->has_second_chip = true;
+
+ mutex_init(&q->lock);
+
+ /* iterate the subnodes. */
+ for_each_available_child_of_node(dev->of_node, np) {
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+
+ nor = &q->nor[i];
+ mtd = &nor->mtd;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+ nor->priv = q;
+
+ if (q->nor_num > 1 && !mtd->name) {
+ int spiflash_idx;
+
+ ret = of_property_read_u32(np, "reg", &spiflash_idx);
+ if (!ret) {
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d",
+ dev_name(dev),
+ spiflash_idx);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto mutex_failed;
+ }
+ } else {
+ dev_warn(dev, "reg property is missing\n");
+ }
+ }
+
+ /* fill the hooks */
+ nor->read_reg = fsl_qspi_read_reg;
+ nor->write_reg = fsl_qspi_write_reg;
+ nor->read = fsl_qspi_read;
+ nor->write = fsl_qspi_write;
+ nor->erase = fsl_qspi_erase;
+
+ nor->prepare = fsl_qspi_prep;
+ nor->unprepare = fsl_qspi_unprep;
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &q->clk_rate);
+ if (ret < 0)
+ goto mutex_failed;
+
+ /* set the chip address for READID */
+ fsl_qspi_set_base_addr(q, nor);
+
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ goto mutex_failed;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto mutex_failed;
+
+ /* Set the correct NOR size now. */
+ if (q->nor_size == 0) {
+ q->nor_size = mtd->size;
+
+ /* Map the SPI NOR to accessiable address */
+ fsl_qspi_set_map_addr(q);
+ }
+
+ /*
+ * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
+ * may writes 265 bytes per time. The write is working in the
+ * unit of the TX FIFO, not in the unit of the SPI NOR's page
+ * size.
+ *
+ * So shrink the spi_nor->page_size if it is larger then the
+ * TX FIFO.
+ */
+ if (nor->page_size > q->devtype_data->txfifo)
+ nor->page_size = q->devtype_data->txfifo;
+
+ i++;
+ }
+
+ /* finish the rest init. */
+ ret = fsl_qspi_nor_setup_last(q);
+ if (ret)
+ goto last_init_failed;
+
+ fsl_qspi_clk_disable_unprep(q);
+ return 0;
+
+last_init_failed:
+ for (i = 0; i < q->nor_num; i++) {
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+ mtd_device_unregister(&q->nor[i].mtd);
+ }
+mutex_failed:
+ mutex_destroy(&q->lock);
+irq_failed:
+ fsl_qspi_clk_disable_unprep(q);
+clk_failed:
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < q->nor_num; i++) {
+ /* skip the holes */
+ if (!q->has_second_chip)
+ i *= 2;
+ mtd_device_unregister(&q->nor[i].mtd);
+ }
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+
+ mutex_destroy(&q->lock);
+
+ if (q->ahb_addr)
+ iounmap(q->ahb_addr);
+
+ return 0;
+}
+
+static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int fsl_qspi_resume(struct platform_device *pdev)
+{
+ int ret;
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ fsl_qspi_nor_setup(q);
+ fsl_qspi_set_map_addr(q);
+ fsl_qspi_nor_setup_last(q);
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ return 0;
+}
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .of_match_table = fsl_qspi_dt_ids,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+ .suspend = fsl_qspi_suspend,
+ .resume = fsl_qspi_resume,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
new file mode 100644
index 000000000..36d2eb091
--- /dev/null
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -0,0 +1,508 @@
+/*
+ * HiSilicon SPI Nor Flash Controller Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field definitions */
+#define FMC_CFG 0x00
+#define FMC_CFG_OP_MODE_MASK BIT_MASK(0)
+#define FMC_CFG_OP_MODE_BOOT 0
+#define FMC_CFG_OP_MODE_NORMAL 1
+#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1)
+#define FMC_CFG_FLASH_SEL_MASK 0x6
+#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5)
+#define FMC_ECC_TYPE_MASK GENMASK(7, 5)
+#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10)
+#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10)
+#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10)
+#define FMC_GLOBAL_CFG 0x04
+#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6)
+#define FMC_SPI_TIMING_CFG 0x08
+#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8)
+#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4)
+#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf)
+#define CS_HOLD_TIME 0x6
+#define CS_SETUP_TIME 0x6
+#define CS_DESELECT_TIME 0xf
+#define FMC_INT 0x18
+#define FMC_INT_OP_DONE BIT(0)
+#define FMC_INT_CLR 0x20
+#define FMC_CMD 0x24
+#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff)
+#define FMC_ADDRL 0x2c
+#define FMC_OP_CFG 0x30
+#define OP_CFG_FM_CS(cs) ((cs) << 11)
+#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7)
+#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4)
+#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf)
+#define FMC_DATA_NUM 0x38
+#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0))
+#define FMC_OP 0x3c
+#define FMC_OP_DUMMY_EN BIT(8)
+#define FMC_OP_CMD1_EN BIT(7)
+#define FMC_OP_ADDR_EN BIT(6)
+#define FMC_OP_WRITE_DATA_EN BIT(5)
+#define FMC_OP_READ_DATA_EN BIT(2)
+#define FMC_OP_READ_STATUS_EN BIT(1)
+#define FMC_OP_REG_OP_START BIT(0)
+#define FMC_DMA_LEN 0x40
+#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0))
+#define FMC_DMA_SADDR_D0 0x4c
+#define HIFMC_DMA_MAX_LEN (4096)
+#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1)
+#define FMC_OP_DMA 0x68
+#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16)
+#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8)
+#define OP_CTRL_RW_OP(op) ((op) << 1)
+#define OP_CTRL_DMA_OP_READY BIT(0)
+#define FMC_OP_READ 0x0
+#define FMC_OP_WRITE 0x1
+#define FMC_WAIT_TIMEOUT 1000000
+
+enum hifmc_iftype {
+ IF_TYPE_STD,
+ IF_TYPE_DUAL,
+ IF_TYPE_DIO,
+ IF_TYPE_QUAD,
+ IF_TYPE_QIO,
+};
+
+struct hifmc_priv {
+ u32 chipselect;
+ u32 clkrate;
+ struct hifmc_host *host;
+};
+
+#define HIFMC_MAX_CHIP_NUM 2
+struct hifmc_host {
+ struct device *dev;
+ struct mutex lock;
+
+ void __iomem *regbase;
+ void __iomem *iobase;
+ struct clk *clk;
+ void *buffer;
+ dma_addr_t dma_buffer;
+
+ struct spi_nor *nor[HIFMC_MAX_CHIP_NUM];
+ u32 num_chip;
+};
+
+static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + FMC_INT, reg,
+ (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
+}
+
+static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto)
+{
+ enum hifmc_iftype if_type;
+
+ switch (proto) {
+ case SNOR_PROTO_1_1_2:
+ if_type = IF_TYPE_DUAL;
+ break;
+ case SNOR_PROTO_1_2_2:
+ if_type = IF_TYPE_DIO;
+ break;
+ case SNOR_PROTO_1_1_4:
+ if_type = IF_TYPE_QUAD;
+ break;
+ case SNOR_PROTO_1_4_4:
+ if_type = IF_TYPE_QIO;
+ break;
+ case SNOR_PROTO_1_1_1:
+ default:
+ if_type = IF_TYPE_STD;
+ break;
+ }
+
+ return if_type;
+}
+
+static void hisi_spi_nor_init(struct hifmc_host *host)
+{
+ u32 reg;
+
+ reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
+ | TIMING_CFG_TCSS(CS_SETUP_TIME)
+ | TIMING_CFG_TSHSL(CS_DESELECT_TIME);
+ writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
+}
+
+static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ mutex_lock(&host->lock);
+
+ ret = clk_set_rate(host->clk, priv->clkrate);
+ if (ret)
+ goto out;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mutex_unlock(&host->lock);
+ return ret;
+}
+
+static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ clk_disable_unprepare(host->clk);
+ mutex_unlock(&host->lock);
+}
+
+static int hisi_spi_nor_op_reg(struct spi_nor *nor,
+ u8 opcode, int len, u8 optype)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u32 reg;
+
+ reg = FMC_CMD_CMD1(opcode);
+ writel(reg, host->regbase + FMC_CMD);
+
+ reg = FMC_DATA_NUM_CNT(len);
+ writel(reg, host->regbase + FMC_DATA_NUM);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
+ writel(reg, host->regbase + FMC_OP);
+
+ return hisi_spi_nor_wait_op_finish(host);
+}
+
+static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, host->iobase, len);
+ return 0;
+}
+
+static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ if (len)
+ memcpy_toio(host->iobase, buf, len);
+
+ return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
+}
+
+static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
+ dma_addr_t dma_buf, size_t len, u8 op_type)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u8 if_type = 0;
+ u32 reg;
+
+ reg = readl(host->regbase + FMC_CFG);
+ reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
+ reg |= FMC_CFG_OP_MODE_NORMAL;
+ reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ : SPI_NOR_ADDR_MODE_3BYTES;
+ writel(reg, host->regbase + FMC_CFG);
+
+ writel(start_off, host->regbase + FMC_ADDRL);
+ writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
+ writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ if (op_type == FMC_OP_READ)
+ if_type = hisi_spi_nor_get_if_type(nor->read_proto);
+ else
+ if_type = hisi_spi_nor_get_if_type(nor->write_proto);
+ reg |= OP_CFG_MEM_IF_TYPE(if_type);
+ if (op_type == FMC_OP_READ)
+ reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
+ reg |= (op_type == FMC_OP_READ)
+ ? OP_CTRL_RD_OPCODE(nor->read_opcode)
+ : OP_CTRL_WR_OPCODE(nor->program_opcode);
+ writel(reg, host->regbase + FMC_OP_DMA);
+
+ return hisi_spi_nor_wait_op_finish(host);
+}
+
+static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ ret = hisi_spi_nor_dma_transfer(nor,
+ from + offset, host->dma_buffer, trans, FMC_OP_READ);
+ if (ret) {
+ dev_warn(nor->dev, "DMA read timeout\n");
+ return ret;
+ }
+ memcpy(read_buf + offset, host->buffer, trans);
+ }
+
+ return len;
+}
+
+static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ memcpy(host->buffer, write_buf + offset, trans);
+ ret = hisi_spi_nor_dma_transfer(nor,
+ to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
+ if (ret) {
+ dev_warn(nor->dev, "DMA write timeout\n");
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+/**
+ * Get spi flash device information and register it as a mtd device.
+ */
+static int hisi_spi_nor_register(struct device_node *np,
+ struct hifmc_host *host)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_READ_1_1_4 |
+ SNOR_HWCAPS_PP,
+ };
+ struct device *dev = host->dev;
+ struct spi_nor *nor;
+ struct hifmc_priv *priv;
+ struct mtd_info *mtd;
+ int ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &priv->chipselect);
+ if (ret) {
+ dev_err(dev, "There's no reg property for %pOF\n",
+ np);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &priv->clkrate);
+ if (ret) {
+ dev_err(dev, "There's no spi-max-frequency property for %pOF\n",
+ np);
+ return ret;
+ }
+ priv->host = host;
+ nor->priv = priv;
+
+ nor->prepare = hisi_spi_nor_prep;
+ nor->unprepare = hisi_spi_nor_unprep;
+ nor->read_reg = hisi_spi_nor_read_reg;
+ nor->write_reg = hisi_spi_nor_write_reg;
+ nor->read = hisi_spi_nor_read;
+ nor->write = hisi_spi_nor_write;
+ nor->erase = NULL;
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ return ret;
+
+ mtd = &nor->mtd;
+ mtd->name = np->name;
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ return ret;
+
+ host->nor[host->num_chip] = nor;
+ host->num_chip++;
+ return 0;
+}
+
+static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->num_chip; i++)
+ mtd_device_unregister(&host->nor[i]->mtd);
+}
+
+static int hisi_spi_nor_register_all(struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct device_node *np;
+ int ret;
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+ if (ret) {
+ of_node_put(np);
+ goto fail;
+ }
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+ break;
+ }
+ }
+
+ return 0;
+
+fail:
+ hisi_spi_nor_unregister_all(host);
+ return ret;
+}
+
+static int hisi_spi_nor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct hifmc_host *host;
+ int ret;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, host);
+ host->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ host->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->regbase))
+ return PTR_ERR(host->regbase);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
+ host->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Unable to set dma mask\n");
+ return ret;
+ }
+
+ host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ return ret;
+
+ mutex_init(&host->lock);
+ hisi_spi_nor_init(host);
+ ret = hisi_spi_nor_register_all(host);
+ if (ret)
+ mutex_destroy(&host->lock);
+
+ clk_disable_unprepare(host->clk);
+ return ret;
+}
+
+static int hisi_spi_nor_remove(struct platform_device *pdev)
+{
+ struct hifmc_host *host = platform_get_drvdata(pdev);
+
+ hisi_spi_nor_unregister_all(host);
+ mutex_destroy(&host->lock);
+ return 0;
+}
+
+static const struct of_device_id hisi_spi_nor_dt_ids[] = {
+ { .compatible = "hisilicon,fmc-spi-nor"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
+
+static struct platform_driver hisi_spi_nor_driver = {
+ .driver = {
+ .name = "hisi-sfc",
+ .of_match_table = hisi_spi_nor_dt_ids,
+ },
+ .probe = hisi_spi_nor_probe,
+ .remove = hisi_spi_nor_remove,
+};
+module_platform_driver(hisi_spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
new file mode 100644
index 000000000..872b40922
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -0,0 +1,86 @@
+/*
+ * Intel PCH/PCU SPI flash PCI driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "intel-spi.h"
+
+#define BCR 0xdc
+#define BCR_WPD BIT(0)
+
+static const struct intel_spi_boardinfo bxt_info = {
+ .type = INTEL_SPI_BXT,
+};
+
+static int intel_spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_spi_boardinfo *info;
+ struct intel_spi *ispi;
+ u32 bcr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Try to make the chip read/write */
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+ info->writeable = !!(bcr & BCR_WPD);
+
+ ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
+ if (IS_ERR(ispi))
+ return PTR_ERR(ispi);
+
+ pci_set_drvdata(pdev, ispi);
+ return 0;
+}
+
+static void intel_spi_pci_remove(struct pci_dev *pdev)
+{
+ intel_spi_remove(pci_get_drvdata(pdev));
+}
+
+static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+
+static struct pci_driver intel_spi_pci_driver = {
+ .name = "intel-spi",
+ .id_table = intel_spi_pci_ids,
+ .probe = intel_spi_pci_probe,
+ .remove = intel_spi_pci_remove,
+};
+
+module_pci_driver(intel_spi_pci_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/intel-spi-platform.c
new file mode 100644
index 000000000..5c943df93
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi-platform.c
@@ -0,0 +1,57 @@
+/*
+ * Intel PCH/PCU SPI flash platform driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "intel-spi.h"
+
+static int intel_spi_platform_probe(struct platform_device *pdev)
+{
+ struct intel_spi_boardinfo *info;
+ struct intel_spi *ispi;
+ struct resource *mem;
+
+ info = dev_get_platdata(&pdev->dev);
+ if (!info)
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ispi = intel_spi_probe(&pdev->dev, mem, info);
+ if (IS_ERR(ispi))
+ return PTR_ERR(ispi);
+
+ platform_set_drvdata(pdev, ispi);
+ return 0;
+}
+
+static int intel_spi_platform_remove(struct platform_device *pdev)
+{
+ struct intel_spi *ispi = platform_get_drvdata(pdev);
+
+ return intel_spi_remove(ispi);
+}
+
+static struct platform_driver intel_spi_platform_driver = {
+ .probe = intel_spi_platform_probe,
+ .remove = intel_spi_platform_remove,
+ .driver = {
+ .name = "intel-spi",
+ },
+};
+
+module_platform_driver(intel_spi_platform_driver);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:intel-spi");
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
new file mode 100644
index 000000000..d60cbf23d
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -0,0 +1,935 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/intel-spi.h>
+
+#include "intel-spi.h"
+
+/* Offsets are from @ispi->base */
+#define BFPREG 0x00
+
+#define HSFSTS_CTL 0x04
+#define HSFSTS_CTL_FSMIE BIT(31)
+#define HSFSTS_CTL_FDBC_SHIFT 24
+#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
+
+#define HSFSTS_CTL_FCYCLE_SHIFT 17
+#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
+/* HW sequencer opcodes */
+#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
+#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
+
+#define HSFSTS_CTL_FGO BIT(16)
+#define HSFSTS_CTL_FLOCKDN BIT(15)
+#define HSFSTS_CTL_FDV BIT(14)
+#define HSFSTS_CTL_SCIP BIT(5)
+#define HSFSTS_CTL_AEL BIT(2)
+#define HSFSTS_CTL_FCERR BIT(1)
+#define HSFSTS_CTL_FDONE BIT(0)
+
+#define FADDR 0x08
+#define DLOCK 0x0c
+#define FDATA(n) (0x10 + ((n) * 4))
+
+#define FRACC 0x50
+
+#define FREG(n) (0x54 + ((n) * 4))
+#define FREG_BASE_MASK 0x3fff
+#define FREG_LIMIT_SHIFT 16
+#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
+
+/* Offset is from @ispi->pregs */
+#define PR(n) ((n) * 4)
+#define PR_WPE BIT(31)
+#define PR_LIMIT_SHIFT 16
+#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
+#define PR_RPE BIT(15)
+#define PR_BASE_MASK 0x3fff
+
+/* Offsets are from @ispi->sregs */
+#define SSFSTS_CTL 0x00
+#define SSFSTS_CTL_FSMIE BIT(23)
+#define SSFSTS_CTL_DS BIT(22)
+#define SSFSTS_CTL_DBC_SHIFT 16
+#define SSFSTS_CTL_SPOP BIT(11)
+#define SSFSTS_CTL_ACS BIT(10)
+#define SSFSTS_CTL_SCGO BIT(9)
+#define SSFSTS_CTL_COP_SHIFT 12
+#define SSFSTS_CTL_FRS BIT(7)
+#define SSFSTS_CTL_DOFRS BIT(6)
+#define SSFSTS_CTL_AEL BIT(4)
+#define SSFSTS_CTL_FCERR BIT(3)
+#define SSFSTS_CTL_FDONE BIT(2)
+#define SSFSTS_CTL_SCIP BIT(0)
+
+#define PREOP_OPTYPE 0x04
+#define OPMENU0 0x08
+#define OPMENU1 0x0c
+
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
+/* CPU specifics */
+#define BYT_PR 0x74
+#define BYT_SSFSTS_CTL 0x90
+#define BYT_BCR 0xfc
+#define BYT_BCR_WPD BIT(0)
+#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
+
+#define LPT_PR 0x74
+#define LPT_SSFSTS_CTL 0x90
+#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
+
+#define BXT_PR 0x84
+#define BXT_SSFSTS_CTL 0xa0
+#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+
+#define INTEL_SPI_TIMEOUT 5000 /* ms */
+#define INTEL_SPI_FIFO_SZ 64
+
+/**
+ * struct intel_spi - Driver private data
+ * @dev: Device pointer
+ * @info: Pointer to board specific info
+ * @nor: SPI NOR layer structure
+ * @base: Beginning of MMIO space
+ * @pregs: Start of protection registers
+ * @sregs: Start of software sequencer registers
+ * @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
+ * @writeable: Is the chip writeable
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
+ * @erase_64k: 64k erase supported
+ * @atomic_preopcode: Holds preopcode when atomic sequence is requested
+ * @opcodes: Opcodes which are supported. This are programmed by BIOS
+ * before it locks down the controller.
+ */
+struct intel_spi {
+ struct device *dev;
+ const struct intel_spi_boardinfo *info;
+ struct spi_nor nor;
+ void __iomem *base;
+ void __iomem *pregs;
+ void __iomem *sregs;
+ size_t nregions;
+ size_t pr_num;
+ bool writeable;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
+ bool erase_64k;
+ u8 atomic_preopcode;
+ u8 opcodes[8];
+};
+
+static bool writeable;
+module_param(writeable, bool, 0);
+MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
+
+static void intel_spi_dump_regs(struct intel_spi *ispi)
+{
+ u32 value;
+ int i;
+
+ dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
+
+ value = readl(ispi->base + HSFSTS_CTL);
+ dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
+ if (value & HSFSTS_CTL_FLOCKDN)
+ dev_dbg(ispi->dev, "-> Locked\n");
+
+ dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
+ dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
+
+ for (i = 0; i < 16; i++)
+ dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
+ i, readl(ispi->base + FDATA(i)));
+
+ dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
+
+ for (i = 0; i < ispi->nregions; i++)
+ dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
+ readl(ispi->base + FREG(i)));
+ for (i = 0; i < ispi->pr_num; i++)
+ dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
+ readl(ispi->pregs + PR(i)));
+
+ value = readl(ispi->sregs + SSFSTS_CTL);
+ dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+ dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+ readl(ispi->sregs + PREOP_OPTYPE));
+ dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
+ dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+
+ if (ispi->info->type == INTEL_SPI_BYT)
+ dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
+ dev_dbg(ispi->dev, "Protected regions:\n");
+ for (i = 0; i < ispi->pr_num; i++) {
+ u32 base, limit;
+
+ value = readl(ispi->pregs + PR(i));
+ if (!(value & (PR_WPE | PR_RPE)))
+ continue;
+
+ limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ base = value & PR_BASE_MASK;
+
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
+ i, base << 12, (limit << 12) | 0xfff,
+ value & PR_WPE ? 'W' : '.',
+ value & PR_RPE ? 'R' : '.');
+ }
+
+ dev_dbg(ispi->dev, "Flash regions:\n");
+ for (i = 0; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || (i > 0 && limit == 0))
+ dev_dbg(ispi->dev, " %02d disabled\n", i);
+ else
+ dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
+ i, base << 12, (limit << 12) | 0xfff);
+ }
+
+ dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
+}
+
+/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
+static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
+static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
+ size_t size)
+{
+ size_t bytes;
+ int i = 0;
+
+ if (size > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ while (size > 0) {
+ bytes = min_t(size_t, size, 4);
+ memcpy_toio(ispi->base + FDATA(i), buf, bytes);
+ size -= bytes;
+ buf += bytes;
+ i++;
+ }
+
+ return 0;
+}
+
+static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
+ !(val & HSFSTS_CTL_SCIP), 40,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
+{
+ u32 val;
+
+ return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
+ !(val & SSFSTS_CTL_SCIP), 40,
+ INTEL_SPI_TIMEOUT * 1000);
+}
+
+static int intel_spi_init(struct intel_spi *ispi)
+{
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
+ int i;
+
+ switch (ispi->info->type) {
+ case INTEL_SPI_BYT:
+ ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BYT_PR;
+ ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
+
+ if (writeable) {
+ /* Disable write protection */
+ val = readl(ispi->base + BYT_BCR);
+ if (!(val & BYT_BCR_WPD)) {
+ val |= BYT_BCR_WPD;
+ writel(val, ispi->base + BYT_BCR);
+ val = readl(ispi->base + BYT_BCR);
+ }
+
+ ispi->writeable = !!(val & BYT_BCR_WPD);
+ }
+
+ break;
+
+ case INTEL_SPI_LPT:
+ ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + LPT_PR;
+ ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
+ break;
+
+ case INTEL_SPI_BXT:
+ ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
+ ispi->pregs = ispi->base + BXT_PR;
+ ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
+ ispi->erase_64k = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Disable #SMI generation from HW sequencer */
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~HSFSTS_CTL_FSMIE;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ /*
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
+ */
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ ispi->erase_64k = false;
+
+ /*
+ * Some controllers can only do basic operations using hardware
+ * sequencer. All other operations are supposed to be carried out
+ * using software sequencer.
+ */
+ if (ispi->swseq_reg) {
+ /* Disable #SMI generation from SW sequencer */
+ val = readl(ispi->sregs + SSFSTS_CTL);
+ val &= ~SSFSTS_CTL_FSMIE;
+ writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
+
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+ }
+ }
+
+ intel_spi_dump_regs(ispi);
+
+ return 0;
+}
+
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
+{
+ int i;
+ int preop;
+
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
+}
+
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
+{
+ u32 val, status;
+ int ret;
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
+
+ switch (opcode) {
+ case SPINOR_OP_RDID:
+ val |= HSFSTS_CTL_FCYCLE_RDID;
+ break;
+ case SPINOR_OP_WRSR:
+ val |= HSFSTS_CTL_FCYCLE_WRSR;
+ break;
+ case SPINOR_OP_RDSR:
+ val |= HSFSTS_CTL_FCYCLE_RDSR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
+{
+ u32 val = 0, status;
+ u8 atomic_preopcode;
+ int ret;
+
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
+ if (ret < 0)
+ return ret;
+
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /*
+ * Always clear it after each SW sequencer operation regardless
+ * of whether it is successful or not.
+ */
+ atomic_preopcode = ispi->atomic_preopcode;
+ ispi->atomic_preopcode = 0;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ val |= ret << SSFSTS_CTL_COP_SHIFT;
+ val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
+ val |= SSFSTS_CTL_SCGO;
+ if (atomic_preopcode) {
+ u16 preop;
+
+ switch (optype) {
+ case OPTYPE_WRITE_NO_ADDR:
+ case OPTYPE_WRITE_WITH_ADDR:
+ /* Pick matching preopcode for the atomic sequence */
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) == atomic_preopcode)
+ ; /* Do nothing */
+ else if ((preop >> 8) == atomic_preopcode)
+ val |= SSFSTS_CTL_SPOP;
+ else
+ return -EINVAL;
+
+ /* Enable atomic sequence */
+ val |= SSFSTS_CTL_ACS;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ }
+ writel(val, ispi->sregs + SSFSTS_CTL);
+
+ ret = intel_spi_wait_sw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->sregs + SSFSTS_CTL);
+ if (status & SSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & SSFSTS_CTL_AEL)
+ return -EACCES;
+
+ return 0;
+}
+
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct intel_spi *ispi = nor->priv;
+ int ret;
+
+ /* Address of the first chip */
+ writel(0, ispi->base + FADDR);
+
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_READ_NO_ADDR);
+ else
+ ret = intel_spi_hw_cycle(ispi, opcode, len);
+
+ if (ret)
+ return ret;
+
+ return intel_spi_read_block(ispi, buf, len);
+}
+
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct intel_spi *ispi = nor->priv;
+ int ret;
+
+ /*
+ * This is handled with atomic operation and preop code in Intel
+ * controller so we only verify that it is available. If the
+ * controller is not locked, program the opcode to the PREOP
+ * register for later use.
+ *
+ * When hardware sequencer is used there is no need to program
+ * any opcodes (it handles them automatically as part of a command).
+ */
+ if (opcode == SPINOR_OP_WREN) {
+ u16 preop;
+
+ if (!ispi->swseq_reg)
+ return 0;
+
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
+ if (ispi->locked)
+ return -EINVAL;
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+ }
+
+ /*
+ * This enables atomic sequence on next SW sycle. Will
+ * be cleared after next operation.
+ */
+ ispi->atomic_preopcode = opcode;
+ return 0;
+ }
+
+ writel(0, ispi->base + FADDR);
+
+ /* Write the value beforehand */
+ ret = intel_spi_write_block(ispi, buf, len);
+ if (ret)
+ return ret;
+
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, len);
+}
+
+static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct intel_spi *ispi = nor->priv;
+ size_t block_size, retlen = 0;
+ u32 val, status;
+ ssize_t ret;
+
+ /*
+ * Atomic sequence is not expected with HW sequencer reads. Make
+ * sure it is cleared regardless.
+ */
+ if (WARN_ON_ONCE(ispi->atomic_preopcode))
+ ispi->atomic_preopcode = 0;
+
+ switch (nor->read_opcode) {
+ case SPINOR_OP_READ:
+ case SPINOR_OP_READ_FAST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while (len > 0) {
+ block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+ /* Read cannot cross 4K boundary */
+ block_size = min_t(loff_t, from + block_size,
+ round_up(from + 1, SZ_4K)) - from;
+
+ writel(from, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_READ;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "read error: %llx: %#x\n", from,
+ status);
+ return ret;
+ }
+
+ ret = intel_spi_read_block(ispi, read_buf, block_size);
+ if (ret)
+ return ret;
+
+ len -= block_size;
+ from += block_size;
+ retlen += block_size;
+ read_buf += block_size;
+ }
+
+ return retlen;
+}
+
+static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *write_buf)
+{
+ struct intel_spi *ispi = nor->priv;
+ size_t block_size, retlen = 0;
+ u32 val, status;
+ ssize_t ret;
+
+ /* Not needed with HW sequencer write, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ while (len > 0) {
+ block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+ /* Write cannot cross 4K boundary */
+ block_size = min_t(loff_t, to + block_size,
+ round_up(to + 1, SZ_4K)) - to;
+
+ writel(to, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
+ val |= HSFSTS_CTL_FCYCLE_WRITE;
+
+ ret = intel_spi_write_block(ispi, write_buf, block_size);
+ if (ret) {
+ dev_err(ispi->dev, "failed to write block\n");
+ return ret;
+ }
+
+ /* Start the write now */
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret) {
+ dev_err(ispi->dev, "timeout\n");
+ return ret;
+ }
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ ret = -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ ret = -EACCES;
+
+ if (ret < 0) {
+ dev_err(ispi->dev, "write error: %llx: %#x\n", to,
+ status);
+ return ret;
+ }
+
+ len -= block_size;
+ to += block_size;
+ retlen += block_size;
+ write_buf += block_size;
+ }
+
+ return retlen;
+}
+
+static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+{
+ size_t erase_size, len = nor->mtd.erasesize;
+ struct intel_spi *ispi = nor->priv;
+ u32 val, status, cmd;
+ int ret;
+
+ /* If the hardware can do 64k erase use that when possible */
+ if (len >= SZ_64K && ispi->erase_64k) {
+ cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
+ erase_size = SZ_64K;
+ } else {
+ cmd = HSFSTS_CTL_FCYCLE_ERASE;
+ erase_size = SZ_4K;
+ }
+
+ if (ispi->swseq_erase) {
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+ 0, OPTYPE_WRITE_WITH_ADDR);
+ if (ret)
+ return ret;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+ }
+
+ /* Not needed with HW sequencer erase, make sure it is cleared */
+ ispi->atomic_preopcode = 0;
+
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ val = readl(ispi->base + HSFSTS_CTL);
+ val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
+ val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
+ val |= cmd;
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
+
+ ret = intel_spi_wait_hw_busy(ispi);
+ if (ret)
+ return ret;
+
+ status = readl(ispi->base + HSFSTS_CTL);
+ if (status & HSFSTS_CTL_FCERR)
+ return -EIO;
+ else if (status & HSFSTS_CTL_AEL)
+ return -EACCES;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+}
+
+static bool intel_spi_is_protected(const struct intel_spi *ispi,
+ unsigned int base, unsigned int limit)
+{
+ int i;
+
+ for (i = 0; i < ispi->pr_num; i++) {
+ u32 pr_base, pr_limit, pr_value;
+
+ pr_value = readl(ispi->pregs + PR(i));
+ if (!(pr_value & (PR_WPE | PR_RPE)))
+ continue;
+
+ pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
+ pr_base = pr_value & PR_BASE_MASK;
+
+ if (pr_base >= base && pr_limit <= limit)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * There will be a single partition holding all enabled flash regions. We
+ * call this "BIOS".
+ */
+static void intel_spi_fill_partition(struct intel_spi *ispi,
+ struct mtd_partition *part)
+{
+ u64 end;
+ int i;
+
+ memset(part, 0, sizeof(*part));
+
+ /* Start from the mandatory descriptor region */
+ part->size = 4096;
+ part->name = "BIOS";
+
+ /*
+ * Now try to find where this partition ends based on the flash
+ * region registers.
+ */
+ for (i = 1; i < ispi->nregions; i++) {
+ u32 region, base, limit;
+
+ region = readl(ispi->base + FREG(i));
+ base = region & FREG_BASE_MASK;
+ limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
+
+ if (base >= limit || limit == 0)
+ continue;
+
+ /*
+ * If any of the regions have protection bits set, make the
+ * whole partition read-only to be on the safe side.
+ */
+ if (intel_spi_is_protected(ispi, base, limit))
+ ispi->writeable = false;
+
+ end = (limit << 12) + 4096;
+ if (end > part->size)
+ part->size = end;
+ }
+}
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+ struct resource *mem, const struct intel_spi_boardinfo *info)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ struct mtd_partition part;
+ struct intel_spi *ispi;
+ int ret;
+
+ if (!info || !mem)
+ return ERR_PTR(-EINVAL);
+
+ ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
+ if (!ispi)
+ return ERR_PTR(-ENOMEM);
+
+ ispi->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ispi->base))
+ return ERR_CAST(ispi->base);
+
+ ispi->dev = dev;
+ ispi->info = info;
+ ispi->writeable = info->writeable;
+
+ ret = intel_spi_init(ispi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ispi->nor.dev = ispi->dev;
+ ispi->nor.priv = ispi;
+ ispi->nor.read_reg = intel_spi_read_reg;
+ ispi->nor.write_reg = intel_spi_write_reg;
+ ispi->nor.read = intel_spi_read;
+ ispi->nor.write = intel_spi_write;
+ ispi->nor.erase = intel_spi_erase;
+
+ ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
+ if (ret) {
+ dev_info(dev, "failed to locate the chip\n");
+ return ERR_PTR(ret);
+ }
+
+ intel_spi_fill_partition(ispi, &part);
+
+ /* Prevent writes if not explicitly enabled */
+ if (!ispi->writeable || !writeable)
+ ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
+
+ ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return ispi;
+}
+EXPORT_SYMBOL_GPL(intel_spi_probe);
+
+int intel_spi_remove(struct intel_spi *ispi)
+{
+ return mtd_device_unregister(&ispi->nor.mtd);
+}
+EXPORT_SYMBOL_GPL(intel_spi_remove);
+
+MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/intel-spi.h
new file mode 100644
index 000000000..5ab7dc250
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel-spi.h
@@ -0,0 +1,24 @@
+/*
+ * Intel PCH/PCU SPI flash driver.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef INTEL_SPI_H
+#define INTEL_SPI_H
+
+#include <linux/platform_data/intel-spi.h>
+
+struct intel_spi;
+struct resource;
+
+struct intel_spi *intel_spi_probe(struct device *dev,
+ struct resource *mem, const struct intel_spi_boardinfo *info);
+int intel_spi_remove(struct intel_spi *ispi);
+
+#endif /* INTEL_SPI_H */
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
new file mode 100644
index 000000000..5442993b7
--- /dev/null
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Bayi Cheng <bayi.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+
+#define MTK_NOR_CMD_REG 0x00
+#define MTK_NOR_CNT_REG 0x04
+#define MTK_NOR_RDSR_REG 0x08
+#define MTK_NOR_RDATA_REG 0x0c
+#define MTK_NOR_RADR0_REG 0x10
+#define MTK_NOR_RADR1_REG 0x14
+#define MTK_NOR_RADR2_REG 0x18
+#define MTK_NOR_WDATA_REG 0x1c
+#define MTK_NOR_PRGDATA0_REG 0x20
+#define MTK_NOR_PRGDATA1_REG 0x24
+#define MTK_NOR_PRGDATA2_REG 0x28
+#define MTK_NOR_PRGDATA3_REG 0x2c
+#define MTK_NOR_PRGDATA4_REG 0x30
+#define MTK_NOR_PRGDATA5_REG 0x34
+#define MTK_NOR_SHREG0_REG 0x38
+#define MTK_NOR_SHREG1_REG 0x3c
+#define MTK_NOR_SHREG2_REG 0x40
+#define MTK_NOR_SHREG3_REG 0x44
+#define MTK_NOR_SHREG4_REG 0x48
+#define MTK_NOR_SHREG5_REG 0x4c
+#define MTK_NOR_SHREG6_REG 0x50
+#define MTK_NOR_SHREG7_REG 0x54
+#define MTK_NOR_SHREG8_REG 0x58
+#define MTK_NOR_SHREG9_REG 0x5c
+#define MTK_NOR_CFG1_REG 0x60
+#define MTK_NOR_CFG2_REG 0x64
+#define MTK_NOR_CFG3_REG 0x68
+#define MTK_NOR_STATUS0_REG 0x70
+#define MTK_NOR_STATUS1_REG 0x74
+#define MTK_NOR_STATUS2_REG 0x78
+#define MTK_NOR_STATUS3_REG 0x7c
+#define MTK_NOR_FLHCFG_REG 0x84
+#define MTK_NOR_TIME_REG 0x94
+#define MTK_NOR_PP_DATA_REG 0x98
+#define MTK_NOR_PREBUF_STUS_REG 0x9c
+#define MTK_NOR_DELSEL0_REG 0xa0
+#define MTK_NOR_DELSEL1_REG 0xa4
+#define MTK_NOR_INTRSTUS_REG 0xa8
+#define MTK_NOR_INTREN_REG 0xac
+#define MTK_NOR_CHKSUM_CTL_REG 0xb8
+#define MTK_NOR_CHKSUM_REG 0xbc
+#define MTK_NOR_CMD2_REG 0xc0
+#define MTK_NOR_WRPROT_REG 0xc4
+#define MTK_NOR_RADR3_REG 0xc8
+#define MTK_NOR_DUAL_REG 0xcc
+#define MTK_NOR_DELSEL2_REG 0xd0
+#define MTK_NOR_DELSEL3_REG 0xd4
+#define MTK_NOR_DELSEL4_REG 0xd8
+
+/* commands for mtk nor controller */
+#define MTK_NOR_READ_CMD 0x0
+#define MTK_NOR_RDSR_CMD 0x2
+#define MTK_NOR_PRG_CMD 0x4
+#define MTK_NOR_WR_CMD 0x10
+#define MTK_NOR_PIO_WR_CMD 0x90
+#define MTK_NOR_WRSR_CMD 0x20
+#define MTK_NOR_PIO_READ_CMD 0x81
+#define MTK_NOR_WR_BUF_ENABLE 0x1
+#define MTK_NOR_WR_BUF_DISABLE 0x0
+#define MTK_NOR_ENABLE_SF_CMD 0x30
+#define MTK_NOR_DUAD_ADDR_EN 0x8
+#define MTK_NOR_QUAD_READ_EN 0x4
+#define MTK_NOR_DUAL_ADDR_EN 0x2
+#define MTK_NOR_DUAL_READ_EN 0x1
+#define MTK_NOR_DUAL_DISABLE 0x0
+#define MTK_NOR_FAST_READ 0x1
+
+#define SFLASH_WRBUF_SIZE 128
+
+/* Can shift up to 48 bits (6 bytes) of TX/RX */
+#define MTK_NOR_MAX_RX_TX_SHIFT 6
+/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
+#define MTK_NOR_MAX_SHIFT 7
+/* nor controller 4-byte address mode enable bit */
+#define MTK_NOR_4B_ADDR_EN BIT(4)
+
+/* Helpers for accessing the program data / shift data registers */
+#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
+#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
+
+struct mtk_nor {
+ struct spi_nor nor;
+ struct device *dev;
+ void __iomem *base; /* nor flash base address */
+ struct clk *spi_clk;
+ struct clk *nor_clk;
+};
+
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
+{
+ struct spi_nor *nor = &mtk_nor->nor;
+
+ switch (nor->read_proto) {
+ case SNOR_PROTO_1_1_1:
+ writeb(nor->read_opcode, mtk_nor->base +
+ MTK_NOR_PRGDATA3_REG);
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
+ MTK_NOR_CFG1_REG);
+ break;
+ case SNOR_PROTO_1_1_2:
+ writeb(nor->read_opcode, mtk_nor->base +
+ MTK_NOR_PRGDATA3_REG);
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
+ MTK_NOR_DUAL_REG);
+ break;
+ case SNOR_PROTO_1_1_4:
+ writeb(nor->read_opcode, mtk_nor->base +
+ MTK_NOR_PRGDATA4_REG);
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
+ MTK_NOR_DUAL_REG);
+ break;
+ default:
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
+ MTK_NOR_DUAL_REG);
+ break;
+ }
+}
+
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
+{
+ int reg;
+ u8 val = cmdval & 0x1f;
+
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
+ !(reg & val), 100, 10000);
+}
+
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
+{
+ int len = 1 + txlen + rxlen;
+ int i, ret, idx;
+
+ if (len > MTK_NOR_MAX_SHIFT)
+ return -EINVAL;
+
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
+
+ /* start at PRGDATA5, go down to PRGDATA0 */
+ idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
+
+ /* opcode */
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
+ idx--;
+
+ /* program TX data */
+ for (i = 0; i < txlen; i++, idx--)
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
+
+ /* clear out rest of TX registers */
+ while (idx >= 0) {
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
+ idx--;
+ }
+
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
+ if (ret)
+ return ret;
+
+ /* restart at first RX byte */
+ idx = rxlen - 1;
+
+ /* read out RX data */
+ for (i = 0; i < rxlen; i++, idx--)
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
+
+ return 0;
+}
+
+/* Do a WRSR (Write Status Register) command */
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
+{
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
+}
+
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
+{
+ u8 reg;
+
+ /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
+ * 0: pre-fetch buffer use for read
+ * 1: pre-fetch buffer use for page program
+ */
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
+ 0x01 == (reg & 0x01), 100, 10000);
+}
+
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
+{
+ u8 reg;
+
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
+ MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
+ 10000);
+}
+
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
+{
+ u8 val;
+ struct spi_nor *nor = &mtk_nor->nor;
+
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
+
+ switch (nor->addr_width) {
+ case 3:
+ val &= ~MTK_NOR_4B_ADDR_EN;
+ break;
+ case 4:
+ val |= MTK_NOR_4B_ADDR_EN;
+ break;
+ default:
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
+ nor->addr_width);
+ break;
+ }
+
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
+}
+
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
+{
+ int i;
+
+ mtk_nor_set_addr_width(mtk_nor);
+
+ for (i = 0; i < 3; i++) {
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ addr >>= 8;
+ }
+ /* Last register is non-contiguous */
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
+}
+
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
+{
+ int i, ret;
+ int addr = (int)from;
+ u8 *buf = (u8 *)buffer;
+ struct mtk_nor *mtk_nor = nor->priv;
+
+ /* set mode for fast read mode ,dual mode or quad mode */
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
+
+ for (i = 0; i < length; i++) {
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
+ if (ret < 0)
+ return ret;
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
+ }
+ return length;
+}
+
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
+{
+ int i, ret;
+
+ mtk_nor_set_addr(mtk_nor, addr);
+
+ for (i = 0; i < length; i++) {
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
+{
+ int i, bufidx, data;
+
+ mtk_nor_set_addr(mtk_nor, addr);
+
+ bufidx = 0;
+ for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
+ data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
+ buf[bufidx + 1]<<8 | buf[bufidx];
+ bufidx += 4;
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
+ }
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
+}
+
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ int ret;
+ struct mtk_nor *mtk_nor = nor->priv;
+ size_t i;
+
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
+ if (ret < 0) {
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
+ return ret;
+ }
+
+ for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
+ if (ret < 0) {
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
+ return ret;
+ }
+ to += SFLASH_WRBUF_SIZE;
+ buf += SFLASH_WRBUF_SIZE;
+ }
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
+ if (ret < 0) {
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
+ return ret;
+ }
+
+ if (i < len) {
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
+ if (ret < 0) {
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+ struct mtk_nor *mtk_nor = nor->priv;
+
+ switch (opcode) {
+ case SPINOR_OP_RDSR:
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
+ if (ret < 0)
+ return ret;
+ if (len == 1)
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
+ else
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
+ break;
+ default:
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
+ break;
+ }
+ return ret;
+}
+
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ int ret;
+ struct mtk_nor *mtk_nor = nor->priv;
+
+ switch (opcode) {
+ case SPINOR_OP_WRSR:
+ /* We only handle 1 byte */
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
+ break;
+ default:
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
+ if (ret)
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
+ break;
+ }
+ return ret;
+}
+
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
+{
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
+}
+
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
+ if (ret) {
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
+ struct device_node *flash_node)
+{
+ const struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
+ SNOR_HWCAPS_PP,
+ };
+ int ret;
+ struct spi_nor *nor;
+
+ /* initialize controller to accept commands */
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
+
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
+ spi_nor_set_flash_node(nor, flash_node);
+
+ /* fill the hooks to spi nor */
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
+ nor->mtd.name = "mtk_nor";
+ /* initialized with NULL */
+ ret = spi_nor_scan(nor, NULL, &hwcaps);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(&nor->mtd, NULL, 0);
+}
+
+static int mtk_nor_drv_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct resource *res;
+ int ret;
+ struct mtk_nor *mtk_nor;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mtk_nor);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
+
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
+
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
+
+ mtk_nor->dev = &pdev->dev;
+
+ ret = mtk_nor_enable_clk(mtk_nor);
+ if (ret)
+ return ret;
+
+ /* only support one attached flash */
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ ret = -ENODEV;
+ goto nor_free;
+ }
+ ret = mtk_nor_init(mtk_nor, flash_np);
+
+nor_free:
+ if (ret)
+ mtk_nor_disable_clk(mtk_nor);
+
+ return ret;
+}
+
+static int mtk_nor_drv_remove(struct platform_device *pdev)
+{
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
+
+ mtk_nor_disable_clk(mtk_nor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
+
+ mtk_nor_disable_clk(mtk_nor);
+
+ return 0;
+}
+
+static int mtk_nor_resume(struct device *dev)
+{
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
+
+ return mtk_nor_enable_clk(mtk_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+ .suspend = mtk_nor_suspend,
+ .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS NULL
+#endif
+
+static const struct of_device_id mtk_nor_of_ids[] = {
+ { .compatible = "mediatek,mt8173-nor"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
+
+static struct platform_driver mtk_nor_driver = {
+ .probe = mtk_nor_drv_probe,
+ .remove = mtk_nor_drv_remove,
+ .driver = {
+ .name = "mtk-nor",
+ .pm = MTK_NOR_DEV_PM_OPS,
+ .of_match_table = mtk_nor_of_ids,
+ },
+};
+
+module_platform_driver(mtk_nor_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
new file mode 100644
index 000000000..0c9094ec5
--- /dev/null
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -0,0 +1,483 @@
+/*
+ * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL 0x000
+#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
+#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
+#define SPIFI_CTRL_MODE3 BIT(23)
+#define SPIFI_CTRL_DUAL BIT(28)
+#define SPIFI_CTRL_FBCLK BIT(30)
+#define SPIFI_CMD 0x004
+#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
+#define SPIFI_CMD_DOUT BIT(15)
+#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
+#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
+#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
+#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
+#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
+#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
+#define SPIFI_CMD_OPCODE(op) ((op) << 24)
+#define SPIFI_ADDR 0x008
+#define SPIFI_IDATA 0x00c
+#define SPIFI_CLIMIT 0x010
+#define SPIFI_DATA 0x014
+#define SPIFI_MCMD 0x018
+#define SPIFI_STAT 0x01c
+#define SPIFI_STAT_MCINIT BIT(0)
+#define SPIFI_STAT_CMD BIT(1)
+#define SPIFI_STAT_RESET BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct nxp_spifi {
+ struct device *dev;
+ struct clk *clk_spifi;
+ struct clk *clk_reg;
+ void __iomem *io_base;
+ void __iomem *flash_base;
+ struct spi_nor nor;
+ bool memory_mode;
+ u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_CMD), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "command timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_RESET), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "state reset timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+ int ret;
+
+ if (!spifi->memory_mode)
+ return 0;
+
+ ret = nxp_spifi_reset(spifi);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter command mode\n");
+ else
+ spifi->memory_mode = false;
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ if (spifi->memory_mode)
+ return 0;
+
+ writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ stat & SPIFI_STAT_MCINIT, 10, 30);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter memory mode\n");
+ else
+ spifi->memory_mode = true;
+
+ return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ *buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_on(spifi);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, spifi->flash_base + from, len);
+
+ return len;
+}
+
+static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+ size_t i;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(to, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->program_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], spifi->io_base + SPIFI_DATA);
+
+ ret = nxp_spifi_wait_for_cmd(spifi);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(offs, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->erase_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+ switch (spifi->nor.read_proto) {
+ case SNOR_PROTO_1_1_1:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+ break;
+ case SNOR_PROTO_1_1_2:
+ case SNOR_PROTO_1_1_4:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ /* Memory mode supports address length between 1 and 4 */
+ if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ return -EINVAL;
+
+ spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+ SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+
+ return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+}
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+ struct device_node *np)
+{
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ u32 ctrl, property;
+ u16 mode = 0;
+ int ret;
+
+ if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+ switch (property) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported rx-bus-width\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_find_property(np, "spi-cpha", NULL))
+ mode |= SPI_CPHA;
+
+ if (of_find_property(np, "spi-cpol", NULL))
+ mode |= SPI_CPOL;
+
+ /* Setup control register defaults */
+ ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+ SPIFI_CTRL_CSHIGH(15) |
+ SPIFI_CTRL_FBCLK;
+
+ if (mode & SPI_RX_DUAL) {
+ ctrl |= SPIFI_CTRL_DUAL;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ } else if (mode & SPI_RX_QUAD) {
+ ctrl &= ~SPIFI_CTRL_DUAL;
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ } else {
+ ctrl |= SPIFI_CTRL_DUAL;
+ }
+
+ switch (mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ ctrl &= ~SPIFI_CTRL_MODE3;
+ break;
+ case SPI_MODE_3:
+ ctrl |= SPIFI_CTRL_MODE3;
+ break;
+ default:
+ dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+ return -EINVAL;
+ }
+
+ writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+ spifi->nor.dev = spifi->dev;
+ spi_nor_set_flash_node(&spifi->nor, np);
+ spifi->nor.priv = spifi;
+ spifi->nor.read = nxp_spifi_read;
+ spifi->nor.write = nxp_spifi_write;
+ spifi->nor.erase = nxp_spifi_erase;
+ spifi->nor.read_reg = nxp_spifi_read_reg;
+ spifi->nor.write_reg = nxp_spifi_write_reg;
+
+ /*
+ * The first read on a hard reset isn't reliable so do a
+ * dummy read of the id before calling spi_nor_scan().
+ * The reason for this problem is unknown.
+ *
+ * The official NXP spifilib uses more or less the same
+ * workaround that is applied here by reading the device
+ * id multiple times.
+ */
+ nxp_spifi_dummy_id_read(&spifi->nor);
+
+ ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps);
+ if (ret) {
+ dev_err(spifi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ ret = nxp_spifi_setup_memory_cmd(spifi);
+ if (ret) {
+ dev_err(spifi->dev, "memory command setup failed\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(&spifi->nor.mtd, NULL, 0);
+ if (ret) {
+ dev_err(spifi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct nxp_spifi *spifi;
+ struct resource *res;
+ int ret;
+
+ spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+ if (!spifi)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi");
+ spifi->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->io_base))
+ return PTR_ERR(spifi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash");
+ spifi->flash_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->flash_base))
+ return PTR_ERR(spifi->flash_base);
+
+ spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ if (IS_ERR(spifi->clk_spifi)) {
+ dev_err(&pdev->dev, "spifi clock not found\n");
+ return PTR_ERR(spifi->clk_spifi);
+ }
+
+ spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(spifi->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found\n");
+ return PTR_ERR(spifi->clk_reg);
+ }
+
+ ret = clk_prepare_enable(spifi->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spifi->clk_spifi);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable spifi clock\n");
+ goto dis_clk_reg;
+ }
+
+ spifi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spifi);
+
+ /* Initialize and reset device */
+ nxp_spifi_reset(spifi);
+ writel(0, spifi->io_base + SPIFI_IDATA);
+ writel(0, spifi->io_base + SPIFI_MCMD);
+ nxp_spifi_reset(spifi);
+
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ ret = -ENODEV;
+ goto dis_clks;
+ }
+
+ ret = nxp_spifi_setup_flash(spifi, flash_np);
+ of_node_put(flash_np);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to setup flash chip\n");
+ goto dis_clks;
+ }
+
+ return 0;
+
+dis_clks:
+ clk_disable_unprepare(spifi->clk_spifi);
+dis_clk_reg:
+ clk_disable_unprepare(spifi->clk_reg);
+ return ret;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+ struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&spifi->nor.mtd);
+ clk_disable_unprepare(spifi->clk_spifi);
+ clk_disable_unprepare(spifi->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+ {.compatible = "nxp,lpc1773-spifi"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+ .probe = nxp_spifi_probe,
+ .remove = nxp_spifi_remove,
+ .driver = {
+ .name = "nxp-spifi",
+ .of_match_table = nxp_spifi_match,
+ },
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
new file mode 100644
index 000000000..ff641c060
--- /dev/null
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -0,0 +1,3013 @@
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
+
+#define SPI_NOR_MAX_ID_LEN 6
+#define SPI_NOR_MAX_ADDR_WIDTH 4
+
+struct flash_info {
+ char *name;
+
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
+#define SPI_S3AN BIT(10) /*
+ * Xilinx Spartan 3AN In-System Flash
+ * (MFR cannot be used for probing
+ * because it has the same value as
+ * ATMEL flashes)
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
+#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+#define USE_CLSR BIT(14) /* use CLSR command */
+
+ int (*quad_enable)(struct spi_nor *nor);
+};
+
+#define JEDEC_MFR(info) ((info)->id[0])
+
+static const struct flash_info *spi_nor_match_id(const char *name);
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occurred.
+ */
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+}
+
+/*
+ * Send write disable instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static inline u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ /* Do some manufacturer fixups first */
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_SPANSION:
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = info->sector_size;
+ break;
+
+ default:
+ break;
+ }
+
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
+ int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_MICRON:
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_WINBOND:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = nor->write_reg(nor, cmd, NULL, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ if (!status && !enable &&
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes
+ * the Extended Address Register to be set to 1, so all
+ * 3-byte-address reads come from the second 16M.
+ * We must clear the register to enable normal behavior.
+ */
+ write_enable(nor);
+ nor->cmd_buf[0] = 0;
+ nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
+ write_disable(nor);
+ }
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
+ }
+}
+
+static int s3an_sr_ready(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ return ret;
+ }
+
+ return !!(val & XSR_RDY);
+}
+
+static inline int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int sr = read_sr(nor);
+ if (sr < 0)
+ return sr;
+
+ if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
+ if (sr & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+ return -EIO;
+ }
+
+ return !(sr & SR_WIP);
+}
+
+static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+ if (fsr < 0)
+ return fsr;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
+}
+
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ if (nor->flags & SNOR_F_READY_XSR_RDY)
+ sr = s3an_sr_ready(nor);
+ else
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + timeout_jiffies;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_err(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_chip(struct spi_nor *nor)
+{
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+}
+
+static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->prepare) {
+ ret = nor->prepare(nor, ops);
+ if (ret) {
+ dev_err(nor->dev, "failed in the preparation.\n");
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ if (nor->unprepare)
+ nor->unprepare(nor, ops);
+ mutex_unlock(&nor->lock);
+}
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
+{
+ unsigned int offset;
+ unsigned int page;
+
+ offset = addr % nor->page_size;
+ page = addr / nor->page_size;
+ page <<= (nor->page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
+ int i;
+
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
+ if (nor->erase)
+ return nor->erase(nor, addr);
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ for (i = nor->addr_width - 1; i >= 0; i--) {
+ buf[i] = addr & 0xff;
+ addr >>= 8;
+ }
+
+ return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+
+ write_enable(nor);
+
+ if (erase_chip(nor)) {
+ ret = -EIO;
+ goto erase_err;
+ }
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else {
+ while (len) {
+ write_enable(nor);
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+ }
+
+ write_disable(nor);
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+
+ return ret;
+}
+
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
+static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ int shift = ffs(mask) - 1;
+ int pow;
+
+ if (!(sr & mask)) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ } else {
+ pow = ((sr & mask) ^ mask) >> shift;
+ *len = mtd->size >> pow;
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+ }
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr, bool locked)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ if (!len)
+ return 1;
+
+ stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2} in the status register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ int status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ status_old = read_sr(nor);
+ if (status_old < 0)
+ return status_old;
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (stm_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!stm_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ /*
+ * Need smallest pow such that:
+ *
+ * 1 / (2^pow) <= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
+ */
+ pow = ilog2(mtd->size) - ilog2(lock_len);
+ val = mask - (pow << shift);
+ if (val & ~mask)
+ return -EINVAL;
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Unlock a region of the flash. See stm_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ int status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ status_old = read_sr(nor);
+ if (status_old < 0)
+ return status_old;
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (stm_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ /*
+ * Need largest pow such that:
+ *
+ * 1 / (2^pow) >= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
+ */
+ pow = ilog2(mtd->size) - order_base_2(lock_len);
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ val = mask - (pow << shift);
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See stm_lock() for
+ * more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int status;
+
+ status = read_sr(nor);
+ if (status < 0)
+ return status;
+
+ return stm_is_locked_sr(nor, ofs, len, status);
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
+static int macronix_quad_enable(struct spi_nor *nor);
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags),
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8*_page_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = _page_size, \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR | SPI_S3AN,
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips. This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ *
+ * All newly added entries should describe *hardware* and should use SECT_4K
+ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
+ * scenarios excluding small sectors there is config option that can be
+ * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
+ * For historical (and compatibility) reasons (before we got above config) some
+ * old entries may be missing 4K flag.
+ */
+static const struct flash_info spi_nor_ids[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
+
+ /* Everspin */
+ { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+
+ /* GigaDevice */
+ {
+ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ .quad_enable = macronix_quad_enable,
+ },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+ /* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+
+ /* Micron */
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+ /* ST Microelectronics -- newer production may have feature updates */
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ {
+ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ {
+ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { },
+};
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ const struct flash_info *info;
+
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+
+ for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+ info = &spi_nor_ids[tmp];
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
+ return &spi_nor_ids[tmp];
+ }
+ }
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
+ id[0], id[1], id[2]);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ if (ret)
+ return ret;
+
+ while (len) {
+ loff_t addr = from;
+
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
+ ret = nor->read(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ return ret;
+}
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ write_enable(nor);
+
+ nor->sst_write_second = false;
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = nor->write(nor, to, 1, buf);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ }
+ to += actual;
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = nor->write(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
+ (int)ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ write_disable(nor);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(nor);
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = nor->write(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ write_disable(nor);
+ actual += 1;
+ }
+sst_write_err:
+ *retlen += actual;
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ * Power of two numbers have only one bit set and we can use
+ * the instruction hweight32 to detect if we need to do a
+ * modulus (do_div()) or not.
+ */
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+ if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
+ addr = spi_nor_s3an_addr_convert(nor, addr);
+
+ write_enable(nor);
+ ret = nor->write(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ if (written != page_remain) {
+ dev_err(nor->dev,
+ "While writing %zu bytes written %zd bytes\n",
+ page_remain, written);
+ ret = -EIO;
+ goto write_err;
+ }
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+}
+
+/**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register.
+ *
+ * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ if (val < 0)
+ return val;
+ if (val & SR_QUAD_EN_MX)
+ return 0;
+
+ write_enable(nor);
+
+ write_sr(nor, val | SR_QUAD_EN_MX);
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occurred.
+ */
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
+{
+ ssize_t ret;
+
+ write_enable(nor);
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function is kept for legacy purpose because it has been used for a
+ * long time without anybody complaining but it should be considered as
+ * deprecated and maybe buggy.
+ * First, this function doesn't care about the previous values of the Status
+ * and Configuration Registers when it sets the QE bit (bit 1) in the
+ * Configuration Register: all other bits are cleared, which may have unwanted
+ * side effects like removing some block protections.
+ * Secondly, it uses the Read Configuration Register (35h) instruction though
+ * some very old and few memories don't support this instruction. If a pull-up
+ * resistor is present on the MISO/IO1 line, we might still be able to pass the
+ * "read back" test because the QSPI memory doesn't recognize the command,
+ * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
+ int ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* read back and check it */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories not supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2];
+ int ret;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+ sr_cr[1] = CR_QUAD_EN_SPAN;
+
+ return write_sr_cr(nor, sr_cr);
+}
+
+/**
+ * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+{
+ struct device *dev = nor->dev;
+ u8 sr_cr[2];
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = read_cr(nor);
+ if (ret < 0) {
+ dev_err(dev, "error while reading configuration register\n");
+ return -EINVAL;
+ }
+
+ if (ret & CR_QUAD_EN_SPAN)
+ return 0;
+
+ sr_cr[1] = ret | CR_QUAD_EN_SPAN;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* Read back and check it. */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 sr2;
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
+ if (ret)
+ return ret;
+ if (sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ sr2 |= SR2_QUAD_EN_BIT7;
+
+ write_enable(nor);
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while writing status register 2\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret < 0) {
+ dev_err(nor->dev, "timeout while writing status register 2\n");
+ return ret;
+ }
+
+ /* Read back and check it. */
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
+ if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
+ dev_err(nor->dev, "SR2 Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev || !nor->read || !nor->write ||
+ !nor->read_reg || !nor->write_reg) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
+ return ret;
+ }
+
+ nor->erase_opcode = SPINOR_OP_XSE;
+ nor->program_opcode = SPINOR_OP_XPP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (val & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ nor->page_size = (nor->page_size == 264) ? 256 : 512;
+ nor->mtd.writebufsize = nor->page_size;
+ nor->mtd.size = 8 * nor->page_size * info->n_sectors;
+ nor->mtd.erasesize = 8 * nor->page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
+ }
+
+ return 0;
+}
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octo SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octo SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ int (*quad_enable)(struct spi_nor *nor);
+};
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+static void
+spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+/*
+ * Serial Flash Discoverable Parameters (SFDP) parsing.
+ */
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_width, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_width = 3;
+ nor->read_dummy = 8;
+
+ while (len) {
+ ret = nor->read(nor, addr, len, (u8 *)buf);
+ if (!ret || ret > len) {
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ nor->read_opcode = read_opcode;
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+/* The first version of JESB216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Writ Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* Fast Read settings. */
+
+static inline void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ BFPT_DWORD(1), BIT(16), /* Supported bit */
+ BFPT_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ BFPT_DWORD(1), BIT(20), /* Supported bit */
+ BFPT_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ BFPT_DWORD(5), BIT(0), /* Supported bit */
+ BFPT_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ BFPT_DWORD(1), BIT(22), /* Supported bit */
+ BFPT_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ BFPT_DWORD(1), BIT(21), /* Supported bit */
+ BFPT_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ BFPT_DWORD(5), BIT(4), /* Supported bit */
+ BFPT_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {BFPT_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {BFPT_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {BFPT_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {BFPT_DWORD(9), 16},
+};
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr;
+ u16 half;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ for (i = 0; i < BFPT_DWORD_MAX; i++)
+ bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ nor->addr_width = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ nor->addr_width = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ params->size = bfpt.dwords[BFPT_DWORD(2)];
+ if (params->size & BIT(31)) {
+ params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << params->size;
+ } else {
+ params->size++;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /* Sector Erase settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ if (erasesize == SZ_4K) {
+ nor->erase_opcode = opcode;
+ mtd->erasesize = erasesize;
+ break;
+ }
+#endif
+ if (!mtd->erasesize || mtd->erasesize < erasesize) {
+ nor->erase_opcode = opcode;
+ mtd->erasesize = erasesize;
+ }
+ }
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length < BFPT_DWORD_MAX)
+ return 0;
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+ params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << params->page_size;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ params->quad_enable = spansion_no_read_cr_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ params->quad_enable = macronix_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT7:
+ params->quad_enable = sr2_bit7_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ params->quad_enable = spansion_read_cr_quad_enable;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ struct device *dev = nor->dev;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_err(dev, "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+ if (err)
+ goto exit;
+
+ /* Parse other parameter headers. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ dev_info(dev, "non-uniform erase sector maps are not supported yet.\n");
+ break;
+
+ default:
+ break;
+ }
+
+ if (err)
+ goto exit;
+ }
+
+exit:
+ kfree(param_headers);
+ return err;
+}
+
+static int spi_nor_init_params(struct spi_nor *nor,
+ const struct flash_info *info,
+ struct spi_nor_flash_parameter *params)
+{
+ /* Set legacy flash parameters as default. */
+ memset(params, 0, sizeof(*params));
+
+ /* Set SPI NOR sizes. */
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ }
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ /* Select the procedure to set the Quad Enable bit. */
+ if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
+ SNOR_HWCAPS_PP_QUAD)) {
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_MACRONIX:
+ params->quad_enable = macronix_quad_enable;
+ break;
+
+ case SNOR_MFR_MICRON:
+ break;
+
+ default:
+ /* Kept only for backward compatibility purpose. */
+ params->quad_enable = spansion_quad_enable;
+ break;
+ }
+
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * set it in flash info list.
+ */
+ if (info->quad_enable)
+ params->quad_enable = info->quad_enable;
+ }
+
+ /* Override the parameters with data read from SFDP tables. */
+ nor->addr_width = 0;
+ nor->mtd.erasesize = 0;
+ if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ !(info->flags & SPI_NOR_SKIP_SFDP)) {
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, params, sizeof(sfdp_params));
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
+ nor->mtd.erasesize = 0;
+ } else {
+ memcpy(params, &sfdp_params, sizeof(*params));
+ }
+ }
+
+ return 0;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ /* Do nothing if already configured from SFDP. */
+ if (mtd->erasesize)
+ return 0;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
+#endif
+ {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ u32 ignored_mask, shared_mask;
+ bool enable_quad_io;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ /* SPI n-n-n protocols are not supported yet. */
+ ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
+ SNOR_HWCAPS_READ_4_4_4 |
+ SNOR_HWCAPS_READ_8_8_8 |
+ SNOR_HWCAPS_PP_4_4_4 |
+ SNOR_HWCAPS_PP_8_8_8);
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported yet.\n");
+ shared_mask &= ~ignored_mask;
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, params, shared_mask);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, params, shared_mask);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor, info);
+ if (err) {
+ dev_err(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Enable Quad I/O if needed. */
+ enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
+ if (err) {
+ dev_err(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+ }
+
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ set_4byte(nor, nor->info, 1);
+ }
+
+ return 0;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES) &&
+ (nor->flags & SNOR_F_BROKEN_RESET))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter params;
+ const struct flash_info *info = NULL;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ if (name)
+ info = spi_nor_match_id(name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return -ENOENT;
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct flash_info *jinfo;
+
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return PTR_ERR(jinfo);
+ } else if (jinfo != info) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(dev, "found %s, expected %s\n",
+ jinfo->name, info->name);
+ info = jinfo;
+ }
+ }
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel spi-nor
+ */
+ if (info->flags & SPI_S3AN)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+
+ /* Parse the Serial Flash Discoverable Parameters table. */
+ ret = spi_nor_init_params(nor, info, &params);
+ if (ret)
+ return ret;
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = params.size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
+
+ /* NOR protection support for STmicro/Micron chips and similar */
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ nor->flash_is_locked = stm_is_locked;
+ }
+
+ if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
+ }
+
+ /* sst nor chips use AAI word program */
+ if (info->flags & SST_WRITE)
+ mtd->_write = sst_write;
+ else
+ mtd->_write = spi_nor_write;
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB)
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = params.page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ else
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ }
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if (info->flags & SPI_NOR_NO_FR)
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
+ */
+ ret = spi_nor_setup(nor, info, &params, hwcaps);
+ if (ret)
+ return ret;
+
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (info->addr_width) {
+ nor->addr_width = info->addr_width;
+ } else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor, info);
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_err(dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ if (info->flags & SPI_S3AN) {
+ ret = s3an_nor_scan(info, nor);
+ if (ret)
+ return ret;
+ }
+
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static const struct flash_info *spi_nor_match_id(const char *name)
+{
+ const struct flash_info *id = spi_nor_ids;
+
+ while (id->name) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
new file mode 100644
index 000000000..13e9fc961
--- /dev/null
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -0,0 +1,720 @@
+/*
+ * Driver for stm32 quadspi controller
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+
+#define QUADSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_TCEN BIT(3)
+#define CR_SSHIFT BIT(4)
+#define CR_DFM BIT(6)
+#define CR_FSEL BIT(7)
+#define CR_FTHRES_SHIFT 8
+#define CR_FTHRES_MASK GENMASK(12, 8)
+#define CR_FTHRES(n) (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK)
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_FTIE BIT(18)
+#define CR_SMIE BIT(19)
+#define CR_TOIE BIT(20)
+#define CR_PRESC_SHIFT 24
+#define CR_PRESC_MASK GENMASK(31, 24)
+#define CR_PRESC(n) (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK)
+
+#define QUADSPI_DCR 0x04
+#define DCR_CSHT_SHIFT 8
+#define DCR_CSHT_MASK GENMASK(10, 8)
+#define DCR_CSHT(n) (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK)
+#define DCR_FSIZE_SHIFT 16
+#define DCR_FSIZE_MASK GENMASK(20, 16)
+#define DCR_FSIZE(n) (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK)
+
+#define QUADSPI_SR 0x08
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_TOF BIT(4)
+#define SR_BUSY BIT(5)
+#define SR_FLEVEL_SHIFT 8
+#define SR_FLEVEL_MASK GENMASK(13, 8)
+
+#define QUADSPI_FCR 0x0c
+#define FCR_CTCF BIT(1)
+
+#define QUADSPI_DLR 0x10
+
+#define QUADSPI_CCR 0x14
+#define CCR_INST_SHIFT 0
+#define CCR_INST_MASK GENMASK(7, 0)
+#define CCR_INST(n) (((n) << CCR_INST_SHIFT) & CCR_INST_MASK)
+#define CCR_IMODE_NONE (0U << 8)
+#define CCR_IMODE_1 (1U << 8)
+#define CCR_IMODE_2 (2U << 8)
+#define CCR_IMODE_4 (3U << 8)
+#define CCR_ADMODE_NONE (0U << 10)
+#define CCR_ADMODE_1 (1U << 10)
+#define CCR_ADMODE_2 (2U << 10)
+#define CCR_ADMODE_4 (3U << 10)
+#define CCR_ADSIZE_SHIFT 12
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_ADSIZE(n) (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK)
+#define CCR_ABMODE_NONE (0U << 14)
+#define CCR_ABMODE_1 (1U << 14)
+#define CCR_ABMODE_2 (2U << 14)
+#define CCR_ABMODE_4 (3U << 14)
+#define CCR_ABSIZE_8 (0U << 16)
+#define CCR_ABSIZE_16 (1U << 16)
+#define CCR_ABSIZE_24 (2U << 16)
+#define CCR_ABSIZE_32 (3U << 16)
+#define CCR_DCYC_SHIFT 18
+#define CCR_DCYC_MASK GENMASK(22, 18)
+#define CCR_DCYC(n) (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK)
+#define CCR_DMODE_NONE (0U << 24)
+#define CCR_DMODE_1 (1U << 24)
+#define CCR_DMODE_2 (2U << 24)
+#define CCR_DMODE_4 (3U << 24)
+#define CCR_FMODE_INDW (0U << 26)
+#define CCR_FMODE_INDR (1U << 26)
+#define CCR_FMODE_APM (2U << 26)
+#define CCR_FMODE_MM (3U << 26)
+
+#define QUADSPI_AR 0x18
+#define QUADSPI_ABR 0x1c
+#define QUADSPI_DR 0x20
+#define QUADSPI_PSMKR 0x24
+#define QUADSPI_PSMAR 0x28
+#define QUADSPI_PIR 0x2c
+#define QUADSPI_LPTR 0x30
+#define LPTR_DFT_TIMEOUT 0x10
+
+#define FSIZE_VAL(size) (__fls(size) - 1)
+
+#define STM32_MAX_MMAP_SZ SZ_256M
+#define STM32_MAX_NORCHIP 2
+
+#define STM32_QSPI_FIFO_SZ 32
+#define STM32_QSPI_FIFO_TIMEOUT_US 30000
+#define STM32_QSPI_BUSY_TIMEOUT_US 100000
+
+struct stm32_qspi_flash {
+ struct spi_nor nor;
+ struct stm32_qspi *qspi;
+ u32 cs;
+ u32 fsize;
+ u32 presc;
+ u32 read_mode;
+ bool registered;
+ u32 prefetch_limit;
+};
+
+struct stm32_qspi {
+ struct device *dev;
+ void __iomem *io_base;
+ void __iomem *mm_base;
+ resource_size_t mm_size;
+ u32 nor_num;
+ struct clk *clk;
+ u32 clk_rate;
+ struct stm32_qspi_flash flash[STM32_MAX_NORCHIP];
+ struct completion cmd_completion;
+
+ /*
+ * to protect device configuration, could be different between
+ * 2 flash access (bk1, bk2)
+ */
+ struct mutex lock;
+};
+
+struct stm32_qspi_cmd {
+ u8 addr_width;
+ u8 dummy;
+ bool tx_data;
+ u8 opcode;
+ u32 framemode;
+ u32 qspimode;
+ u32 addr;
+ size_t len;
+ void *buf;
+};
+
+static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
+{
+ u32 cr;
+ int err = 0;
+
+ if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF)
+ return 0;
+
+ reinit_completion(&qspi->cmd_completion);
+ cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
+ writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR);
+
+ if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
+ return err;
+}
+
+static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr,
+ !(sr & SR_BUSY), 10,
+ STM32_QSPI_BUSY_TIMEOUT_US);
+}
+
+static void stm32_qspi_set_framemode(struct spi_nor *nor,
+ struct stm32_qspi_cmd *cmd, bool read)
+{
+ u32 dmode = CCR_DMODE_1;
+
+ cmd->framemode = CCR_IMODE_1;
+
+ if (read) {
+ switch (nor->read_proto) {
+ default:
+ case SNOR_PROTO_1_1_1:
+ dmode = CCR_DMODE_1;
+ break;
+ case SNOR_PROTO_1_1_2:
+ dmode = CCR_DMODE_2;
+ break;
+ case SNOR_PROTO_1_1_4:
+ dmode = CCR_DMODE_4;
+ break;
+ }
+ }
+
+ cmd->framemode |= cmd->tx_data ? dmode : 0;
+ cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0;
+}
+
+static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
+ const struct stm32_qspi_cmd *cmd)
+{
+ void (*tx_fifo)(u8 *, void __iomem *);
+ u32 len = cmd->len, sr;
+ u8 *buf = cmd->buf;
+ int ret;
+
+ if (cmd->qspimode == CCR_FMODE_INDW)
+ tx_fifo = stm32_qspi_write_fifo;
+ else
+ tx_fifo = stm32_qspi_read_fifo;
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR,
+ sr, (sr & SR_FTF), 10,
+ STM32_QSPI_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
+ return ret;
+ }
+ tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
+ const struct stm32_qspi_cmd *cmd)
+{
+ memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len);
+ return 0;
+}
+
+static int stm32_qspi_tx(struct stm32_qspi *qspi,
+ const struct stm32_qspi_cmd *cmd)
+{
+ if (!cmd->tx_data)
+ return 0;
+
+ if (cmd->qspimode == CCR_FMODE_MM)
+ return stm32_qspi_tx_mm(qspi, cmd);
+
+ return stm32_qspi_tx_poll(qspi, cmd);
+}
+
+static int stm32_qspi_send(struct stm32_qspi_flash *flash,
+ const struct stm32_qspi_cmd *cmd)
+{
+ struct stm32_qspi *qspi = flash->qspi;
+ u32 ccr, dcr, cr;
+ u32 last_byte;
+ int err;
+
+ err = stm32_qspi_wait_nobusy(qspi);
+ if (err)
+ goto abort;
+
+ dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK;
+ dcr |= DCR_FSIZE(flash->fsize);
+ writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR);
+
+ cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
+ cr &= ~CR_PRESC_MASK & ~CR_FSEL;
+ cr |= CR_PRESC(flash->presc);
+ cr |= flash->cs ? CR_FSEL : 0;
+ writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
+
+ if (cmd->tx_data)
+ writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR);
+
+ ccr = cmd->framemode | cmd->qspimode;
+
+ if (cmd->dummy)
+ ccr |= CCR_DCYC(cmd->dummy);
+
+ if (cmd->addr_width)
+ ccr |= CCR_ADSIZE(cmd->addr_width - 1);
+
+ ccr |= CCR_INST(cmd->opcode);
+ writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR);
+
+ if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM)
+ writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR);
+
+ err = stm32_qspi_tx(qspi, cmd);
+ if (err)
+ goto abort;
+
+ if (cmd->qspimode != CCR_FMODE_MM) {
+ err = stm32_qspi_wait_cmd(qspi);
+ if (err)
+ goto abort;
+ writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+ } else {
+ last_byte = cmd->addr + cmd->len;
+ if (last_byte > flash->prefetch_limit)
+ goto abort;
+ }
+
+ return err;
+
+abort:
+ cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
+
+ if (err)
+ dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
+ return err;
+}
+
+static int stm32_qspi_read_reg(struct spi_nor *nor,
+ u8 opcode, u8 *buf, int len)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct device *dev = flash->qspi->dev;
+ struct stm32_qspi_cmd cmd;
+
+ dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = opcode;
+ cmd.tx_data = true;
+ cmd.len = len;
+ cmd.buf = buf;
+ cmd.qspimode = CCR_FMODE_INDR;
+
+ stm32_qspi_set_framemode(nor, &cmd, false);
+
+ return stm32_qspi_send(flash, &cmd);
+}
+
+static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct device *dev = flash->qspi->dev;
+ struct stm32_qspi_cmd cmd;
+
+ dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = opcode;
+ cmd.tx_data = !!(buf && len > 0);
+ cmd.len = len;
+ cmd.buf = buf;
+ cmd.qspimode = CCR_FMODE_INDW;
+
+ stm32_qspi_set_framemode(nor, &cmd, false);
+
+ return stm32_qspi_send(flash, &cmd);
+}
+
+static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct stm32_qspi *qspi = flash->qspi;
+ struct stm32_qspi_cmd cmd;
+ int err;
+
+ dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n",
+ nor->read_opcode, buf, (u32)from, len);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = nor->read_opcode;
+ cmd.addr_width = nor->addr_width;
+ cmd.addr = (u32)from;
+ cmd.tx_data = true;
+ cmd.dummy = nor->read_dummy;
+ cmd.len = len;
+ cmd.buf = buf;
+ cmd.qspimode = flash->read_mode;
+
+ stm32_qspi_set_framemode(nor, &cmd, true);
+ err = stm32_qspi_send(flash, &cmd);
+
+ return err ? err : len;
+}
+
+static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct device *dev = flash->qspi->dev;
+ struct stm32_qspi_cmd cmd;
+ int err;
+
+ dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n",
+ nor->program_opcode, buf, (u32)to, len);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = nor->program_opcode;
+ cmd.addr_width = nor->addr_width;
+ cmd.addr = (u32)to;
+ cmd.tx_data = true;
+ cmd.len = len;
+ cmd.buf = (void *)buf;
+ cmd.qspimode = CCR_FMODE_INDW;
+
+ stm32_qspi_set_framemode(nor, &cmd, false);
+ err = stm32_qspi_send(flash, &cmd);
+
+ return err ? err : len;
+}
+
+static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct device *dev = flash->qspi->dev;
+ struct stm32_qspi_cmd cmd;
+
+ dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = nor->erase_opcode;
+ cmd.addr_width = nor->addr_width;
+ cmd.addr = (u32)offs;
+ cmd.qspimode = CCR_FMODE_INDW;
+
+ stm32_qspi_set_framemode(nor, &cmd, false);
+
+ return stm32_qspi_send(flash, &cmd);
+}
+
+static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
+{
+ struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
+ u32 cr, sr, fcr = 0;
+
+ cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
+ sr = readl_relaxed(qspi->io_base + QUADSPI_SR);
+
+ if ((cr & CR_TCIE) && (sr & SR_TCF)) {
+ /* tx complete */
+ fcr |= FCR_CTCF;
+ complete(&qspi->cmd_completion);
+ } else {
+ dev_info_ratelimited(qspi->dev, "spurious interrupt\n");
+ }
+
+ writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct stm32_qspi *qspi = flash->qspi;
+
+ mutex_lock(&qspi->lock);
+ return 0;
+}
+
+static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct stm32_qspi_flash *flash = nor->priv;
+ struct stm32_qspi *qspi = flash->qspi;
+
+ mutex_unlock(&qspi->lock);
+}
+
+static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
+ struct device_node *np)
+{
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ u32 width, presc, cs_num, max_rate = 0;
+ struct stm32_qspi_flash *flash;
+ struct mtd_info *mtd;
+ int ret;
+
+ of_property_read_u32(np, "reg", &cs_num);
+ if (cs_num >= STM32_MAX_NORCHIP)
+ return -EINVAL;
+
+ of_property_read_u32(np, "spi-max-frequency", &max_rate);
+ if (!max_rate)
+ return -EINVAL;
+
+ presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1;
+
+ if (of_property_read_u32(np, "spi-rx-bus-width", &width))
+ width = 1;
+
+ if (width == 4)
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ else if (width == 2)
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ else if (width != 1)
+ return -EINVAL;
+
+ flash = &qspi->flash[cs_num];
+ flash->qspi = qspi;
+ flash->cs = cs_num;
+ flash->presc = presc;
+
+ flash->nor.dev = qspi->dev;
+ spi_nor_set_flash_node(&flash->nor, np);
+ flash->nor.priv = flash;
+ mtd = &flash->nor.mtd;
+
+ flash->nor.read = stm32_qspi_read;
+ flash->nor.write = stm32_qspi_write;
+ flash->nor.erase = stm32_qspi_erase;
+ flash->nor.read_reg = stm32_qspi_read_reg;
+ flash->nor.write_reg = stm32_qspi_write_reg;
+ flash->nor.prepare = stm32_qspi_prep;
+ flash->nor.unprepare = stm32_qspi_unprep;
+
+ writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR);
+
+ writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT
+ | CR_EN, qspi->io_base + QUADSPI_CR);
+
+ /*
+ * in stm32 qspi controller, QUADSPI_DCR register has a fsize field
+ * which define the size of nor flash.
+ * if fsize is NULL, the controller can't sent spi-nor command.
+ * set a temporary value just to discover the nor flash with
+ * "spi_nor_scan". After, the right value (mtd->size) can be set.
+ */
+ flash->fsize = FSIZE_VAL(SZ_1K);
+
+ ret = spi_nor_scan(&flash->nor, NULL, &hwcaps);
+ if (ret) {
+ dev_err(qspi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ flash->fsize = FSIZE_VAL(mtd->size);
+ flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
+
+ flash->read_mode = CCR_FMODE_MM;
+ if (mtd->size > qspi->mm_size)
+ flash->read_mode = CCR_FMODE_INDR;
+
+ writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR);
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(qspi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ flash->registered = true;
+
+ dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n",
+ flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width);
+
+ return 0;
+}
+
+static void stm32_qspi_mtd_free(struct stm32_qspi *qspi)
+{
+ int i;
+
+ for (i = 0; i < STM32_MAX_NORCHIP; i++)
+ if (qspi->flash[i].registered)
+ mtd_device_unregister(&qspi->flash[i].nor.mtd);
+}
+
+static int stm32_qspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *flash_np;
+ struct reset_control *rstc;
+ struct stm32_qspi *qspi;
+ struct resource *res;
+ int ret, irq;
+
+ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
+ if (!qspi)
+ return -ENOMEM;
+
+ qspi->nor_num = of_get_child_count(dev->of_node);
+ if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
+ qspi->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
+ qspi->mm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
+
+ qspi->mm_size = resource_size(res);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
+ dev_name(dev), qspi);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&qspi->cmd_completion);
+
+ qspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
+
+ qspi->clk_rate = clk_get_rate(qspi->clk);
+ if (!qspi->clk_rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ udelay(2);
+ reset_control_deassert(rstc);
+ }
+
+ qspi->dev = dev;
+ platform_set_drvdata(pdev, qspi);
+ mutex_init(&qspi->lock);
+
+ for_each_available_child_of_node(dev->of_node, flash_np) {
+ ret = stm32_qspi_flash_setup(qspi, flash_np);
+ if (ret) {
+ dev_err(dev, "unable to setup flash chip\n");
+ goto err_flash;
+ }
+ }
+
+ return 0;
+
+err_flash:
+ mutex_destroy(&qspi->lock);
+ stm32_qspi_mtd_free(qspi);
+
+ clk_disable_unprepare(qspi->clk);
+ return ret;
+}
+
+static int stm32_qspi_remove(struct platform_device *pdev)
+{
+ struct stm32_qspi *qspi = platform_get_drvdata(pdev);
+
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QUADSPI_CR);
+
+ stm32_qspi_mtd_free(qspi);
+ mutex_destroy(&qspi->lock);
+
+ clk_disable_unprepare(qspi->clk);
+ return 0;
+}
+
+static const struct of_device_id stm32_qspi_match[] = {
+ {.compatible = "st,stm32f469-qspi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_qspi_match);
+
+static struct platform_driver stm32_qspi_driver = {
+ .probe = stm32_qspi_probe,
+ .remove = stm32_qspi_remove,
+ .driver = {
+ .name = "stm32-quadspi",
+ .of_match_table = stm32_qspi_match,
+ },
+};
+module_platform_driver(stm32_qspi_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
+MODULE_LICENSE("GPL v2");