summaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:15:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:15:09 +0000
commitd78786f17f03109c3e9c7043b1b63f947afb0ee0 (patch)
tree58709afdbb58c9a8b795c712d281a6b5710d36cc /drivers/mtd
parentAdding debian version 4.19.269-1. (diff)
downloadlinux-d78786f17f03109c3e9c7043b1b63f947afb0ee0.tar.xz
linux-d78786f17f03109c3e9c7043b1b63f947afb0ee0.zip
Merging upstream version 4.19.282.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/mtdblock.c12
-rw-r--r--drivers/mtd/mtdcore.c4
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/ubi/build.c16
-rw-r--r--drivers/mtd/ubi/vmt.c18
-rw-r--r--drivers/mtd/ubi/wl.c30
8 files changed, 66 insertions, 20 deletions
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 90e6cb64d..b004d3213 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -442,6 +442,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
/* lpddr2_nvm address range */
add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!add_range)
+ return -ENODEV;
/* Populate map_info data structure */
*map = (struct map_info) {
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 2cde28ed9..59d2fe1f4 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -69,6 +69,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
if (!info->map.virt) {
printk(KERN_WARNING "Failed to ioremap %s\n",
info->map.name);
+ kfree(info);
return -ENOMEM;
}
info->map.cached =
@@ -91,6 +92,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
iounmap((void *)info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
+ kfree(info);
return -EIO;
}
info->mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index a5b1933c0..17b3495fe 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -164,7 +164,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@@ -199,8 +199,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
- if (!sect_size)
- return mtd_read(mtd, pos, len, &retlen, buf);
+ if (!sect_size) {
+ ret = mtd_read(mtd, pos, len, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ return 0;
+ }
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -220,7 +224,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a0b1a7814..90d9c0e93 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -567,8 +567,10 @@ int add_mtd_device(struct mtd_info *mtd)
dev_set_drvdata(&mtd->dev, mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
- if (error)
+ if (error) {
+ put_device(&mtd->dev);
goto fail_added;
+ }
if (!IS_ERR_OR_NULL(dfs_dir_mtd)) {
mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 88075e420..fe7bfcdf7 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1670,7 +1670,7 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
if (section < ecc->steps)
oobregion->length = 4;
else
- oobregion->offset = mtd->oobsize - oobregion->offset;
+ oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 1ea3a4977..3eb14c68c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -480,6 +480,7 @@ static int uif_init(struct ubi_device *ubi)
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
+ ubi->volumes[i] = NULL;
goto out_volumes;
}
}
@@ -673,6 +674,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
+ /*
+ * Memory allocation for VID header is ubi->vid_hdr_alsize
+ * which is described in comments in io.c.
+ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+ * ubi->vid_hdr_alsize, so that all vid header operations
+ * won't access memory out of bounds.
+ */
+ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+ " + VID header size(%zu) > VID header aligned size(%d).",
+ ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+ return -EINVAL;
+ }
+
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 9f6ffd340..c5dec5884 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -477,7 +477,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
- goto out_acc;
+ goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
@@ -525,8 +525,10 @@ out_acc:
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
+ return err;
+
out_free:
- kfree(new_eba_tbl);
+ ubi_eba_destroy_table(new_eba_tbl);
return err;
}
@@ -593,6 +595,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
+ vol_release(&vol->dev);
return err;
}
@@ -603,15 +606,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
- if (err)
- goto out_cdev;
+ if (err) {
+ cdev_del(&vol->cdev);
+ put_device(&vol->dev);
+ return err;
+ }
self_check_volumes(ubi);
return err;
-
-out_cdev:
- cdev_del(&vol->cdev);
- return err;
}
/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ac336164f..83c460f7a 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -568,6 +568,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
+ * @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@@ -865,8 +866,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- if (e2)
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
goto out_ro;
}
@@ -948,11 +952,11 @@ out_error:
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
- wl_entry_destroy(ubi, e1);
- wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@@ -1043,8 +1047,6 @@ out_unlock:
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
- * @shutdown: non-zero if the worker has to free memory and exit
- * because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
@@ -1094,16 +1096,20 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1219,6 +1225,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ if (!e) {
+ /*
+ * This wl entry has been removed for some errors by other
+ * process (eg. wear leveling worker), corresponding process
+ * (except __erase_worker, which cannot concurrent with
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+ * just ignore this wl entry.
+ */
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ }
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to