summaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:30 +0000
commite54def4ad8144ab15f826416e2e0f290ef1901b4 (patch)
tree583f8d4bd95cd67c44ff37b878a7eddfca9ab97a /drivers/mtd
parentAdding upstream version 6.8.12. (diff)
downloadlinux-e54def4ad8144ab15f826416e2e0f290ef1901b4.tar.xz
linux-e54def4ad8144ab15f826416e2e0f290ef1901b4.zip
Adding upstream version 6.9.2.upstream/6.9.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c46
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c265
-rw-r--r--drivers/mtd/mtd_blkdevs.c12
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c99
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c126
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c136
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c3
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c83
-rw-r--r--drivers/mtd/nand/spi/winbond.c12
-rw-r--r--drivers/mtd/spi-nor/core.c187
-rw-r--r--drivers/mtd/spi-nor/core.h30
-rw-r--r--drivers/mtd/spi-nor/debugfs.c26
-rw-r--r--drivers/mtd/spi-nor/sfdp.c47
-rw-r--r--drivers/mtd/ssfdc.c7
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/block.c142
-rw-r--r--drivers/mtd/ubi/build.c154
-rw-r--r--drivers/mtd/ubi/eba.c7
-rw-r--r--drivers/mtd/ubi/kapi.c56
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/vmt.c75
35 files changed, 988 insertions, 768 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index df589d9b4..9f2223d3e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -2411,7 +2411,7 @@ static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret;
@@ -2512,7 +2512,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long timeo = jiffies + HZ;
+ unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index aa44a23ec..caacdc0a3 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,8 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -106,8 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -142,8 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -198,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->bdev_handle->bdev);
+ sync_blockdev(file_bdev(dev->bdev_file));
return;
}
@@ -210,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->bdev_handle) {
- invalidate_mapping_pages(
- dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
- bdev_release(dev->bdev_handle);
+ if (dev->bdev_file) {
+ invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
+ bdev_fput(dev->bdev_file);
}
kfree(dev);
@@ -223,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
+static struct file __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
+ struct file *bdev_file = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -234,7 +230,7 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev_handle;
+ return bdev_file;
/*
* We might not have the root device mounted at this point.
@@ -253,20 +249,20 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev_handle))
+ bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_file))
break;
}
}
#endif
- return bdev_handle;
+ return bdev_file;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -279,16 +275,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev_handle))
- bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
+ bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_file))
+ bdev_file = mdtblock_early_get_bdev(devname, mode, timeout,
dev);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->bdev_handle = bdev_handle;
- bdev = bdev_handle->bdev;
+ dev->bdev_file = bdev_file;
+ bdev = file_bdev(bdev_file);
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0c1b93303..ec52277e3 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -638,7 +638,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
/* name must be usable with cmdlinepart */
sprintf(priv->name, "spi%d.%d-%s",
- spi->master->bus_num, spi_get_chipselect(spi, 0),
+ spi->controller->bus_num, spi_get_chipselect(spi, 0),
name);
device = &priv->mtd;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index e098ae937..8a8b19874 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -341,13 +341,6 @@ config MTD_UCLINUX
help
Map driver to support image based filesystems for uClinux.
-config MTD_INTEL_VR_NOR
- tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
- depends on PCI
- help
- Map driver for a NOR flash bank located on the Expansion Bus of the
- Intel Vermilion Range chipset.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 094cfb244..a9083c888 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -40,6 +40,5 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
-obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
deleted file mode 100644
index d67b845b0..000000000
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * drivers/mtd/maps/intel_vr_nor.c
- *
- * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
- * Vermilion Range chipset.
- *
- * The Vermilion Range Expansion Bus supports four chip selects, each of which
- * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
- * is a 256MiB memory region containing the address spaces for all four of the
- * chip selects, with start addresses hardcoded on 64MiB boundaries.
- *
- * This map driver only supports NOR flash on chip select 0. The buswidth
- * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
- * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
- * not modify the value in the EXP_TIMING_CS0 register except to enable writing
- * and disable boot acceleration. The timing parameters in the register are
- * assumed to have been properly initialized by the BIOS. The reset default
- * timing parameters are maximally conservative (slow), so access to the flash
- * will be slower than it should be if the BIOS has not initialized the timing
- * parameters.
- *
- * Author: Andy Lowe <alowe@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/cfi.h>
-#include <linux/mtd/flashchip.h>
-
-#define DRV_NAME "vr_nor"
-
-struct vr_nor_mtd {
- void __iomem *csr_base;
- struct map_info map;
- struct mtd_info *info;
- struct pci_dev *dev;
-};
-
-/* Expansion Bus Configuration and Status Registers are in BAR 0 */
-#define EXP_CSR_MBAR 0
-/* Expansion Bus Memory Window is BAR 1 */
-#define EXP_WIN_MBAR 1
-/* Maximum address space for Chip Select 0 is 64MiB */
-#define CS0_SIZE 0x04000000
-/* Chip Select 0 is at offset 0 in the Memory Window */
-#define CS0_START 0x0
-/* Chip Select 0 Timing Register is at offset 0 in CSR */
-#define EXP_TIMING_CS0 0x00
-#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
-#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
-#define TIMING_WR_EN (1 << 1) /* Write Enable */
-#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
-#define TIMING_MASK 0x3FFF0000
-
-static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
-{
- mtd_device_unregister(p->info);
-}
-
-static int vr_nor_init_partitions(struct vr_nor_mtd *p)
-{
- /* register the flash bank */
- /* partition the flash bank */
- return mtd_device_register(p->info, NULL, 0);
-}
-
-static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
-{
- map_destroy(p->info);
-}
-
-static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
-{
- static const char * const probe_types[] =
- { "cfi_probe", "jedec_probe", NULL };
- const char * const *type;
-
- for (type = probe_types; !p->info && *type; type++)
- p->info = do_map_probe(*type, &p->map);
- if (!p->info)
- return -ENODEV;
-
- p->info->dev.parent = &p->dev->dev;
-
- return 0;
-}
-
-static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
-{
- unsigned int exp_timing_cs0;
-
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-}
-
-/*
- * Initialize the map_info structure and map the flash.
- * Returns 0 on success, nonzero otherwise.
- */
-static int vr_nor_init_maps(struct vr_nor_mtd *p)
-{
- unsigned long csr_phys, csr_len;
- unsigned long win_phys, win_len;
- unsigned int exp_timing_cs0;
- int err;
-
- csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
- csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
- win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
- win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
-
- if (!csr_phys || !csr_len || !win_phys || !win_len)
- return -ENODEV;
-
- if (win_len < (CS0_START + CS0_SIZE))
- return -ENXIO;
-
- p->csr_base = ioremap(csr_phys, csr_len);
- if (!p->csr_base)
- return -ENOMEM;
-
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- if (!(exp_timing_cs0 & TIMING_CS_EN)) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is disabled.\n");
- err = -ENODEV;
- goto release;
- }
- if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
- dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
- "is configured for maximally slow access times.\n");
- }
- p->map.name = DRV_NAME;
- p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
- p->map.phys = win_phys + CS0_START;
- p->map.size = CS0_SIZE;
- p->map.virt = ioremap(p->map.phys, p->map.size);
- if (!p->map.virt) {
- err = -ENOMEM;
- goto release;
- }
- simple_map_init(&p->map);
-
- /* Enable writes to flash bank */
- exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- return 0;
-
- release:
- iounmap(p->csr_base);
- return err;
-}
-
-static const struct pci_device_id vr_nor_pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
- {0,}
-};
-
-static void vr_nor_pci_remove(struct pci_dev *dev)
-{
- struct vr_nor_mtd *p = pci_get_drvdata(dev);
-
- vr_nor_destroy_partitions(p);
- vr_nor_destroy_mtd_setup(p);
- vr_nor_destroy_maps(p);
- kfree(p);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct vr_nor_mtd *p = NULL;
- unsigned int exp_timing_cs0;
- int err;
-
- err = pci_enable_device(dev);
- if (err)
- goto out;
-
- err = pci_request_regions(dev, DRV_NAME);
- if (err)
- goto disable_dev;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- err = -ENOMEM;
- if (!p)
- goto release;
-
- p->dev = dev;
-
- err = vr_nor_init_maps(p);
- if (err)
- goto release;
-
- err = vr_nor_mtd_setup(p);
- if (err)
- goto destroy_maps;
-
- err = vr_nor_init_partitions(p);
- if (err)
- goto destroy_mtd_setup;
-
- pci_set_drvdata(dev, p);
-
- return 0;
-
- destroy_mtd_setup:
- map_destroy(p->info);
-
- destroy_maps:
- /* write-protect the flash bank */
- exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
- exp_timing_cs0 &= ~TIMING_WR_EN;
- writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
-
- /* unmap the flash window */
- iounmap(p->map.virt);
-
- /* unmap the csr window */
- iounmap(p->csr_base);
-
- release:
- kfree(p);
- pci_release_regions(dev);
-
- disable_dev:
- pci_disable_device(dev);
-
- out:
- return err;
-}
-
-static struct pci_driver vr_nor_pci_driver = {
- .name = DRV_NAME,
- .probe = vr_nor_pci_probe,
- .remove = vr_nor_pci_remove,
- .id_table = vr_nor_pci_ids,
-};
-
-module_pci_driver(vr_nor_pci_driver);
-
-MODULE_AUTHOR("Andy Lowe");
-MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f0526dcc2..3caa0717d 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -277,6 +277,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
+ struct queue_limits lim = { };
int last_devnum = -1;
struct gendisk *gd;
int ret;
@@ -331,9 +332,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
+
+ lim.logical_block_size = tr->blksize;
+ if (tr->discard)
+ lim.max_hw_discard_sectors = UINT_MAX;
/* Create gendisk */
- gd = blk_mq_alloc_disk(new->tag_set, new);
+ gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tag_set;
@@ -371,14 +376,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
- blk_queue_logical_block_size(new->rq, tr->blksize);
-
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
- if (tr->discard)
- blk_queue_max_discard_sectors(new->rq, UINT_MAX);
-
gd->queue = new->rq;
if (new->readonly)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6d5c75541..0de87bc63 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -956,10 +956,8 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
size = mtd_otp_size(mtd, true);
- if (size < 0) {
- err = size;
- goto err;
- }
+ if (size < 0)
+ return size;
if (size > 0) {
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 4cb478bbe..dc75d50d5 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1378,7 +1378,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
return ret;
/*
- * The write cycle timing is directly matching tWC, but is also
+ * The read cycle timing is directly matching tRC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 9907e3ec4..0536568c6 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -2,7 +2,7 @@
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcmbca_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
deleted file mode 100644
index 968c5b674..000000000
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2015 Broadcom Corporation
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct bcm63138_nand_soc {
- struct brcmnand_soc soc;
- void __iomem *base;
-};
-
-#define BCM63138_NAND_INT_STATUS 0x00
-#define BCM63138_NAND_INT_EN 0x04
-
-enum {
- BCM63138_CTLRDY = BIT(4),
-};
-
-static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
- u32 val = brcmnand_readl(mmio);
-
- if (val & BCM63138_CTLRDY) {
- brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
- return true;
- }
-
- return false;
-}
-
-static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
- struct bcm63138_nand_soc *priv =
- container_of(soc, struct bcm63138_nand_soc, soc);
- void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
- u32 val = brcmnand_readl(mmio);
-
- if (en)
- val |= BCM63138_CTLRDY;
- else
- val &= ~BCM63138_CTLRDY;
-
- brcmnand_writel(val, mmio);
-}
-
-static int bcm63138_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct bcm63138_nand_soc *priv;
- struct brcmnand_soc *soc;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- soc = &priv->soc;
-
- priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- soc->ctlrdy_ack = bcm63138_nand_intc_ack;
- soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
-
- return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id bcm63138_nand_of_match[] = {
- { .compatible = "brcm,nand-bcm63138" },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
-
-static struct platform_driver bcm63138_nand_driver = {
- .probe = bcm63138_nand_probe,
- .remove_new = brcmnand_remove,
- .driver = {
- .name = "bcm63138_nand",
- .pm = &brcmnand_pm_ops,
- .of_match_table = bcm63138_nand_of_match,
- }
-};
-module_platform_driver(bcm63138_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
new file mode 100644
index 000000000..ea534850b
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcmbca_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCMBCA_NAND_INT_STATUS 0x00
+#define BCMBCA_NAND_INT_EN 0x04
+
+enum {
+ BCMBCA_CTLRDY = BIT(4),
+};
+
+#if defined(CONFIG_ARM64)
+#define ALIGN_REQ 8
+#else
+#define ALIGN_REQ 4
+#endif
+
+static inline bool bcmbca_nand_is_buf_aligned(void *flash_cache, void *buffer)
+{
+ return IS_ALIGNED((uintptr_t)buffer, ALIGN_REQ) &&
+ IS_ALIGNED((uintptr_t)flash_cache, ALIGN_REQ);
+}
+
+static bool bcmbca_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCMBCA_CTLRDY) {
+ brcmnand_writel(val & ~BCMBCA_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcmbca_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcmbca_nand_soc *priv =
+ container_of(soc, struct bcmbca_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCMBCA_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCMBCA_CTLRDY;
+ else
+ val &= ~BCMBCA_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static void bcmbca_read_data_bus(struct brcmnand_soc *soc,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ /*
+ * memcpy can do unaligned aligned access depending on source
+ * and dest address, which is incompatible with nand cache. Fallback
+ * to the memcpy_fromio in such case
+ */
+ if (bcmbca_nand_is_buf_aligned((void __force *)flash_cache, buffer))
+ memcpy((void *)buffer, (void __force *)flash_cache, fc_words * 4);
+ else
+ memcpy_fromio((void *)buffer, flash_cache, fc_words * 4);
+}
+
+static int bcmbca_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcmbca_nand_soc *priv;
+ struct brcmnand_soc *soc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcmbca_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcmbca_nand_intc_set;
+ soc->read_data_bus = bcmbca_read_data_bus;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcmbca_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcmbca_nand_of_match);
+
+static struct platform_driver bcmbca_nand_driver = {
+ .probe = bcmbca_nand_probe,
+ .remove_new = brcmnand_remove,
+ .driver = {
+ .name = "bcmbca_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcmbca_nand_of_match,
+ }
+};
+module_platform_driver(bcmbca_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCMBCA");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index b8e70fc64..1b2ec0fec 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -851,6 +851,20 @@ static inline u32 edu_readl(struct brcmnand_controller *ctrl,
return brcmnand_readl(ctrl->edu_base + offs);
}
+static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
+ void __iomem *flash_cache, u32 *buffer, int fc_words)
+{
+ struct brcmnand_soc *soc = ctrl->soc;
+ int i;
+
+ if (soc && soc->read_data_bus) {
+ soc->read_data_bus(soc, flash_cache, buffer, fc_words);
+ } else {
+ for (i = 0; i < fc_words; i++)
+ buffer[i] = brcmnand_read_fc(ctrl, i);
+ }
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -1024,6 +1038,22 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
return -1;
}
+static bool brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int sector_size_bit = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control;
+
+ if (sector_size_bit < 0)
+ return false;
+
+ acc_control = nand_readreg(ctrl, acc_control_offs);
+
+ return ((acc_control & BIT(sector_size_bit)) != 0);
+}
+
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -1041,6 +1071,43 @@ static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
nand_writereg(ctrl, acc_control_offs, tmp);
}
+static int brcmnand_get_spare_size(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 acc = nand_readreg(ctrl, acc_control_offs);
+
+ return (acc & brcmnand_spare_area_mask(ctrl));
+}
+
+static void brcmnand_get_ecc_settings(struct brcmnand_host *host, struct nand_chip *chip)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ bool sector_size_1k = brcmnand_get_sector_size_1k(host);
+ int spare_area_size, ecc_level;
+ u32 acc;
+
+ spare_area_size = brcmnand_get_spare_size(host);
+ acc = nand_readreg(ctrl, acc_control_offs);
+ ecc_level = (acc & brcmnand_ecc_level_mask(ctrl)) >> ctrl->ecc_level_shift;
+ if (sector_size_1k)
+ chip->ecc.strength = ecc_level * 2;
+ else if (spare_area_size == 16 && ecc_level == 15)
+ chip->ecc.strength = 1; /* hamming */
+ else
+ chip->ecc.strength = ecc_level;
+
+ if (chip->ecc.size == 0) {
+ if (sector_size_1k)
+ chip->ecc.size = 1024;
+ else
+ chip->ecc.size = 512;
+ }
+}
+
/***********************************************************************
* CS_NAND_SELECT
***********************************************************************/
@@ -1084,8 +1151,8 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_host *host,
if ((val & mask) == expected_val)
return 0;
- dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
- expected_val, val & mask);
+ dev_err(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
return -ETIMEDOUT;
}
@@ -1975,7 +2042,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
- int i, j, ret = 0;
+ int i, ret = 0;
brcmnand_clear_ecc_addr(ctrl);
@@ -1988,8 +2055,8 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
if (likely(buf)) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
- for (j = 0; j < FC_WORDS; j++, buf++)
- *buf = brcmnand_read_fc(ctrl, j);
+ brcmnand_read_data_bus(ctrl, ctrl->nand_fc, buf, FC_WORDS);
+ buf += FC_WORDS;
brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
}
@@ -2137,7 +2204,7 @@ try_dmaread:
return err;
}
- dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ dev_err(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
/* NAND layer expects zero on ECC errors */
@@ -2339,7 +2406,7 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
}
static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
- const struct nand_operation *op)
+ const struct nand_operation *op)
{
const struct nand_op_instr *instr = &op->instrs[i];
struct brcmnand_controller *ctrl = host->ctrl;
@@ -2353,7 +2420,7 @@ static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
* (WAITRDY excepted).
*/
last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
- ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR));
+ ((i == (op->ninstrs - 2)) && (op->instrs[i + 1].type == NAND_OP_WAITRDY_INSTR));
switch (instr->type) {
case NAND_OP_CMD_INSTR:
@@ -2398,10 +2465,10 @@ static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
static int brcmnand_op_is_status(const struct nand_operation *op)
{
- if ((op->ninstrs == 2) &&
- (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
- (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) &&
- (op->instrs[1].type == NAND_OP_DATA_IN_INSTR))
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
return 1;
return 0;
@@ -2409,10 +2476,10 @@ static int brcmnand_op_is_status(const struct nand_operation *op)
static int brcmnand_op_is_reset(const struct nand_operation *op)
{
- if ((op->ninstrs == 2) &&
- (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
- (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) &&
- (op->instrs[1].type == NAND_OP_WAITRDY_INSTR))
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET &&
+ op->instrs[1].type == NAND_OP_WAITRDY_INSTR)
return 1;
return 0;
@@ -2440,8 +2507,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
*status = ret & 0xFF;
return 0;
- }
- else if (brcmnand_op_is_reset(op)) {
+ } else if (brcmnand_op_is_reset(op)) {
ret = brcmnand_reset(host);
if (ret < 0)
return ret;
@@ -2612,19 +2678,37 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
- char msg[128];
+ struct device_node *np = nand_get_flash_node(chip);
u32 offs, tmp, oob_sector;
+ bool use_strap = false;
+ char msg[128];
int ret;
memset(cfg, 0, sizeof(*cfg));
+ use_strap = of_property_read_bool(np, "brcm,nand-ecc-use-strap");
- ret = of_property_read_u32(nand_get_flash_node(chip),
- "brcm,nand-oob-sector-size",
+ /*
+ * Either nand-ecc-xxx or brcm,nand-ecc-use-strap can be set. Error out
+ * if both exist.
+ */
+ if (chip->ecc.strength && use_strap) {
+ dev_err(ctrl->dev,
+ "ECC strap and DT ECC configuration properties are mutually exclusive\n");
+ return -EINVAL;
+ }
+
+ if (use_strap)
+ brcmnand_get_ecc_settings(host, chip);
+
+ ret = of_property_read_u32(np, "brcm,nand-oob-sector-size",
&oob_sector);
if (ret) {
- /* Use detected size */
- cfg->spare_area_size = mtd->oobsize /
- (mtd->writesize >> FC_SHIFT);
+ if (use_strap)
+ cfg->spare_area_size = brcmnand_get_spare_size(host);
+ else
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
} else {
cfg->spare_area_size = oob_sector;
}
@@ -3139,6 +3223,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+ /* Check if the board connects the WP pin */
+ if (of_property_read_bool(dn, "brcm,wp-not-connected"))
+ wp_on = 0;
+
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index 928114c0b..9f171252a 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -24,6 +24,8 @@ struct brcmnand_soc {
void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
bool is_param);
+ void (*read_data_bus)(struct brcmnand_soc *soc, void __iomem *flash_cache,
+ u32 *buffer, int fc_words);
const struct brcmnand_io_ops *ops;
};
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 1e3a80f06..df6a0d5c8 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -869,7 +869,8 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER, "lbc_ctrl_dev missing\n");
+
lbc = fsl_lbc_ctrl_dev->regs;
dev = fsl_lbc_ctrl_dev->dev;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 60198e33d..17477bb2d 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1356,7 +1356,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
return -EINVAL;
}
- chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ chip = devm_kzalloc(dev, struct_size(chip, sels, nsels),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 2479fa98f..d7dbbd469 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -3728,6 +3728,9 @@ read_retry:
}
nand_deselect_target(chip);
+ if (WARN_ON_ONCE(chip->cont_read.ongoing))
+ chip->cont_read.ongoing = false;
+
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index e4664fa6f..a8fba5f39 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -576,7 +576,6 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf,
startblock &= bbtblocks - 1;
} else {
chips = 1;
- bbtblocks = mtd->size >> this->bbt_erase_shift;
}
for (i = 0; i < chips; i++) {
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 9695f07b5..a74e64e0c 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -31,7 +31,6 @@ struct hynix_read_retry {
/**
* struct hynix_nand - private Hynix NAND struct
- * @nand_technology: manufacturing process expressed in picometer
* @read_retry: read-retry information
*/
struct hynix_nand {
@@ -402,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
if (ret)
pr_warn("failed to initialize read-retry infrastructure");
- return ret;
+ return 0;
}
static void hynix_nand_extract_oobsize(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 88811139a..264556939 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -37,7 +38,7 @@
#define FMC2_MAX_SG 16
/* Max chip enable */
-#define FMC2_MAX_CE 2
+#define FMC2_MAX_CE 4
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
@@ -243,6 +244,13 @@ static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
return container_of(chip, struct stm32_fmc2_nand, chip);
}
+struct stm32_fmc2_nfc;
+
+struct stm32_fmc2_nfc_data {
+ int max_ncs;
+ int (*set_cdev)(struct stm32_fmc2_nfc *nfc);
+};
+
struct stm32_fmc2_nfc {
struct nand_controller base;
struct stm32_fmc2_nand nand;
@@ -256,6 +264,7 @@ struct stm32_fmc2_nfc {
phys_addr_t data_phys_addr[FMC2_MAX_CE];
struct clk *clk;
u8 irq_state;
+ const struct stm32_fmc2_nfc_data *data;
struct dma_chan *dma_tx_ch;
struct dma_chan *dma_rx_ch;
@@ -264,6 +273,8 @@ struct stm32_fmc2_nfc {
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
int dma_ecc_len;
+ u32 tx_dma_max_burst;
+ u32 rx_dma_max_burst;
struct completion complete;
struct completion dma_data_complete;
@@ -347,20 +358,26 @@ static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
stm32_fmc2_nfc_setup(chip);
stm32_fmc2_nfc_timings_init(chip);
- if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
+ if (nfc->dma_tx_ch) {
memset(&dma_cfg, 0, sizeof(dma_cfg));
- dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
- dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_cfg.src_maxburst = 32;
- dma_cfg.dst_maxburst = 32;
+ dma_cfg.dst_maxburst = nfc->tx_dma_max_burst /
+ dma_cfg.dst_addr_width;
ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "tx DMA engine slave config failed\n");
return ret;
}
+ }
+
+ if (nfc->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = nfc->rx_dma_max_burst /
+ dma_cfg.src_addr_width;
ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
if (ret) {
@@ -1545,6 +1562,7 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
{
+ struct dma_slave_caps caps;
int ret = 0;
nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
@@ -1557,6 +1575,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_tx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->tx_dma_max_burst = caps.max_burst;
+
nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
if (IS_ERR(nfc->dma_rx_ch)) {
ret = PTR_ERR(nfc->dma_rx_ch);
@@ -1567,6 +1590,11 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
goto err_dma;
}
+ ret = dma_get_slave_caps(nfc->dma_rx_ch, &caps);
+ if (ret)
+ return ret;
+ nfc->rx_dma_max_burst = caps.max_burst;
+
nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
if (IS_ERR(nfc->dma_ecc_ch)) {
ret = PTR_ERR(nfc->dma_ecc_ch);
@@ -1790,7 +1818,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
return ret;
}
- if (cs >= FMC2_MAX_CE) {
+ if (cs >= nfc->data->max_ncs) {
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
@@ -1896,9 +1924,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->base);
nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
- ret = stm32_fmc2_nfc_set_cdev(nfc);
- if (ret)
- return ret;
+ nfc->data = of_device_get_match_data(dev);
+ if (!nfc->data)
+ return -EINVAL;
+
+ if (nfc->data->set_cdev) {
+ ret = nfc->data->set_cdev(nfc);
+ if (ret)
+ return ret;
+ } else {
+ nfc->cdev = dev->parent;
+ }
ret = stm32_fmc2_nfc_parse_dt(nfc);
if (ret)
@@ -1917,7 +1953,7 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
if (nfc->dev == nfc->cdev)
start_region = 1;
- for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ for (chip_cs = 0, mem_region = start_region; chip_cs < nfc->data->max_ncs;
chip_cs++, mem_region += 3) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
@@ -2073,7 +2109,7 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
stm32_fmc2_nfc_wp_disable(nand);
- for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ for (chip_cs = 0; chip_cs < nfc->data->max_ncs; chip_cs++) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
@@ -2086,9 +2122,28 @@ static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
stm32_fmc2_nfc_resume);
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp1_data = {
+ .max_ncs = 2,
+ .set_cdev = stm32_fmc2_nfc_set_cdev,
+};
+
+static const struct stm32_fmc2_nfc_data stm32_fmc2_nfc_mp25_data = {
+ .max_ncs = 4,
+};
+
static const struct of_device_id stm32_fmc2_nfc_match[] = {
- {.compatible = "st,stm32mp15-fmc2"},
- {.compatible = "st,stm32mp1-fmc2-nfc"},
+ {
+ .compatible = "st,stm32mp15-fmc2",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp1-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp25-fmc2-nfc",
+ .data = &stm32_fmc2_nfc_mp25_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 1a473021c..ba7c813b9 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -15,6 +15,8 @@
#define WINBOND_CFG_BUF_READ BIT(3)
+#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -118,6 +120,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
+ case W25N04KV_STATUS_ECC_5_8_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
@@ -214,6 +217,15 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W25N04KV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x23),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 4129764fa..3e1f19135 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1158,7 +1158,7 @@ static u8 spi_nor_convert_3to4_erase(u8 opcode)
static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
- return !!nor->params->erase_map.uniform_erase_type;
+ return !!nor->params->erase_map.uniform_region.erase_mask;
}
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
@@ -1542,7 +1542,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
const struct spi_nor_erase_type *erase;
u32 rem;
int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
* Erase types are ordered by size, with the smallest erase type at
@@ -1550,7 +1549,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
/* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
+ if (!(region->erase_mask & BIT(i)))
continue;
erase = &map->erase_type[i];
@@ -1558,8 +1557,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue;
/* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION &&
- region->size <= len)
+ if (region->overlaid && region->size <= len)
return erase;
/* Don't erase more than what the user has asked for. */
@@ -1574,59 +1572,6 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
return NULL;
}
-static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
/**
* spi_nor_init_erase_cmd() - initialize an erase command
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -1649,7 +1594,7 @@ spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
cmd->opcode = erase->opcode;
cmd->count = 1;
- if (region->offset & SNOR_OVERLAID_REGION)
+ if (region->overlaid)
cmd->size = region->size;
else
cmd->size = erase->size;
@@ -1693,44 +1638,36 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
struct spi_nor_erase_region *region;
struct spi_nor_erase_command *cmd = NULL;
u64 region_end;
+ unsigned int i;
int ret = -EINVAL;
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
-
- region_end = spi_nor_region_end(region);
+ for (i = 0; i < map->n_regions && len; i++) {
+ region = &map->regions[i];
+ region_end = region->offset + region->size;
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- erase->size != cmd->size ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
+ while (len && addr >= region->offset && addr < region_end) {
+ erase = spi_nor_find_best_erase_type(map, region, addr,
+ len);
+ if (!erase)
goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
- addr += cmd->size;
- len -= cmd->size;
+ if (prev_erase != erase || erase->size != cmd->size ||
+ region->overlaid) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
+ len -= cmd->size;
+ addr += cmd->size;
+ prev_erase = erase;
}
-
- prev_erase = erase;
}
return 0;
@@ -2468,12 +2405,11 @@ void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size)
{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
+ map->uniform_region.offset = 0;
map->uniform_region.size = flash_size;
+ map->uniform_region.erase_mask = erase_mask;
map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
+ map->n_regions = 1;
}
int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
@@ -2560,7 +2496,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
{
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
int i;
- u8 uniform_erase_type = map->uniform_erase_type;
+ u8 uniform_erase_type = map->uniform_region.erase_mask;
/*
* Search for the biggest erase size, except for when compiled
@@ -2599,8 +2535,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
return NULL;
/* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
+ map->uniform_region.erase_mask = BIT(erase - map->erase_type);
return erase;
}
@@ -3434,7 +3369,54 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
return info;
}
-static void spi_nor_set_mtd_info(struct spi_nor *nor)
+static u32
+spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase_type)
+{
+ int i;
+
+ if (region->overlaid)
+ return region->size;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (region->erase_mask & BIT(i))
+ return erase_type[i].size;
+ }
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_region *region = map->regions;
+ struct mtd_erase_region_info *mtd_region;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 erasesize, i;
+
+ mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
+ GFP_KERNEL);
+ if (!mtd_region)
+ return -ENOMEM;
+
+ for (i = 0; i < map->n_regions; i++) {
+ erasesize = spi_nor_get_region_erasesize(&region[i],
+ map->erase_type);
+ if (!erasesize)
+ return -EINVAL;
+
+ mtd_region[i].erasesize = erasesize;
+ mtd_region[i].numblocks = div64_ul(region[i].size, erasesize);
+ mtd_region[i].offset = region[i].offset;
+ }
+
+ mtd->numeraseregions = map->n_regions;
+ mtd->eraseregions = mtd_region;
+
+ return 0;
+}
+
+static int spi_nor_set_mtd_info(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
struct device *dev = nor->dev;
@@ -3465,6 +3447,11 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
+
+ if (!spi_nor_has_uniform_erase(nor))
+ return spi_nor_set_mtd_eraseregions(nor);
+
+ return 0;
}
static int spi_nor_hw_reset(struct spi_nor *nor)
@@ -3555,7 +3542,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
/* No mtd_info fields should be used up to this point. */
- spi_nor_set_mtd_info(nor);
+ ret = spi_nor_set_mtd_info(nor);
+ if (ret)
+ return ret;
dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
SPI_NOR_MAX_ID_LEN, nor->id);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index d36c0e072..442786685 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -240,27 +240,21 @@ struct spi_nor_erase_command {
/**
* struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
* @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
* @size: the size of the region in bytes.
+ * @erase_mask: bitmask to indicate all the supported erase commands
+ * inside this region. The erase types are sorted in
+ * ascending order with the smallest Erase Type size being
+ * at BIT(0).
+ * @overlaid: determine if this region is overlaid.
*/
struct spi_nor_erase_region {
u64 offset;
u64 size;
+ u8 erase_mask;
+ bool overlaid;
};
#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
/**
* struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
@@ -273,17 +267,13 @@ struct spi_nor_erase_region {
* The erase types are sorted in ascending order, with the
* smallest Erase Type size being the first member in the
* erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
+ * @n_regions: number of erase regions.
*/
struct spi_nor_erase_map {
struct spi_nor_erase_region *regions;
struct spi_nor_erase_region uniform_region;
struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
+ unsigned int n_regions;
};
/**
@@ -675,8 +665,6 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
u8 opcode);
void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
-struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region);
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size);
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index 2dbda6b69..fa6956144 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -78,10 +78,10 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
struct spi_nor *nor = s->private;
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *erase_map = &params->erase_map;
- struct spi_nor_erase_region *region;
+ struct spi_nor_erase_region *region = erase_map->regions;
const struct flash_info *info = nor->info;
char buf[16], *str;
- int i;
+ unsigned int i;
seq_printf(s, "name\t\t%s\n", info->name);
seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id);
@@ -142,22 +142,20 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
}
seq_puts(s, "\nsector map\n");
- seq_puts(s, " region (in hex) | erase mask | flags\n");
+ seq_puts(s, " region (in hex) | erase mask | overlaid\n");
seq_puts(s, " ------------------+------------+----------\n");
- for (region = erase_map->regions;
- region;
- region = spi_nor_region_next(region)) {
- u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK;
- u64 end = start + region->size - 1;
+ for (i = 0; i < erase_map->n_regions; i++) {
+ u64 start = region[i].offset;
+ u64 end = start + region[i].size - 1;
+ u8 erase_mask = region[i].erase_mask;
seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n",
start, end,
- flags & BIT(0) ? '0' : ' ',
- flags & BIT(1) ? '1' : ' ',
- flags & BIT(2) ? '2' : ' ',
- flags & BIT(3) ? '3' : ' ',
- flags & SNOR_OVERLAID_REGION ? "overlaid" : "");
+ erase_mask & BIT(0) ? '0' : ' ',
+ erase_mask & BIT(1) ? '1' : ' ',
+ erase_mask & BIT(2) ? '2' : ' ',
+ erase_mask & BIT(3) ? '3' : ' ',
+ region[i].overlaid ? "yes" : "no");
}
return 0;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index 57713de32..5b1117265 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -389,19 +389,15 @@ static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{
struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
+ u8 sorted_erase_mask;
+ unsigned int i;
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
+ for (i = 0; i < map->n_regions; i++) {
+ sorted_erase_mask =
+ spi_nor_sort_erase_mask(map, region[i].erase_mask);
/* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
+ region[i].erase_mask = sorted_erase_mask;
}
}
@@ -554,8 +550,6 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
* selecting the uniform erase.
*/
spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
@@ -806,16 +800,6 @@ out:
return ret;
}
-static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
/**
* spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
* @region: pointer to a structure that describes a SPI NOR erase region
@@ -833,7 +817,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
continue;
if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
+ region->overlaid = true;
return;
}
}
@@ -868,6 +852,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
if (!region)
return -ENOMEM;
map->regions = region;
+ map->n_regions = region_count;
uniform_erase_type = 0xff;
regions_erase_type = 0;
@@ -875,9 +860,10 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
/* Populate regions. */
for (i = 0; i < region_count; i++) {
j = i + 1; /* index for the region dword */
+ region[i].offset = offset;
region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
+ region[i].erase_mask = erase_type;
spi_nor_region_check_overlay(&region[i], erase, erase_type);
@@ -893,21 +879,20 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
*/
regions_erase_type |= erase_type;
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
+ offset = region[i].offset + region[i].size;
}
- spi_nor_region_mark_end(&region[i - 1]);
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
+ save_uniform_erase_type = map->uniform_region.erase_mask;
+ map->uniform_region.erase_mask =
+ spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
if (!regions_erase_type) {
/*
* Roll back to the previous uniform_erase_type mask, SMPT is
* broken.
*/
- map->uniform_erase_type = save_uniform_erase_type;
+ map->uniform_region.erase_mask = save_uniform_erase_type;
return -EINVAL;
}
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 211f279a3..46c01fa2e 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -295,7 +295,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (cis_sector == -1)
return;
- ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ ssfdc = kzalloc(sizeof(*ssfdc), GFP_KERNEL);
if (!ssfdc)
return;
@@ -332,7 +332,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
kmalloc_array(ssfdc->map_len,
sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
if (!ssfdc->logic_block_map)
- goto out_err;
+ goto out_free_ssfdc;
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
@@ -350,7 +350,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
out_err:
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
+out_free_ssfdc:
+ kfree(ssfdc);
}
static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 7499a5401..e28a3af83 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -113,4 +113,17 @@ config MTD_UBI_FAULT_INJECTION
testing purposes.
If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605..4b51aaf00 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 654bd7372..f82e3423a 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -348,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
int ubiblock_create(struct ubi_volume_info *vi)
{
+ struct queue_limits lim = {
+ .max_segments = UBI_MAX_SG_COUNT,
+ };
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
@@ -393,7 +396,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Initialize the gendisk of this ubiblock device */
- gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
@@ -416,7 +419,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->gd = gd;
dev->rq = gd->queue;
- blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
list_add_tail(&dev->list, &ubiblock_devices);
@@ -534,6 +536,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -541,10 +607,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -570,56 +633,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -645,18 +658,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7d4ff1193..a7e3a6246 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -92,7 +93,7 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -260,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -297,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -326,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -513,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -1098,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1109,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1219,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1304,25 +1314,79 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_slab;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
- ubi_debugfs_exit();
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
@@ -1332,13 +1396,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 8d1f0e058..e5ac3cd0b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1456,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5db653eac..f1ea86774 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 000000000..8aeb9c428
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+#include <asm/div64.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint64_t lnum = from;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = do_div(lnum, unv->usable_leb_size);
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return bytes_left == 0 ? 0 : -EIO;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b42bb45d..32009a248 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -337,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -561,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -955,6 +957,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 2c867d16f..5a3558bbb 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct device *dev,
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -124,6 +124,31 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ return fw_vol;
+ }
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -189,7 +214,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -352,6 +378,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -408,6 +447,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -453,10 +493,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,7 +514,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -493,7 +538,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -501,19 +545,23 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
- return err;
-
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
@@ -592,6 +640,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);