diff options
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f0639df6cd..3acd7006ad 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -426,11 +426,10 @@ static void reset_bdev(struct zram *zram) if (!zram->backing_dev) return; - fput(zram->bdev_file); /* hope filp_close flush all of IO */ filp_close(zram->backing_dev, NULL); zram->backing_dev = NULL; - zram->bdev_file = NULL; + zram->bdev = NULL; zram->disk->fops = &zram_devops; kvfree(zram->bitmap); zram->bitmap = NULL; @@ -473,10 +472,8 @@ static ssize_t backing_dev_store(struct device *dev, size_t sz; struct file *backing_dev = NULL; struct inode *inode; - struct address_space *mapping; unsigned int bitmap_sz; unsigned long nr_pages, *bitmap = NULL; - struct file *bdev_file = NULL; int err; struct zram *zram = dev_to_zram(dev); @@ -497,15 +494,14 @@ static ssize_t backing_dev_store(struct device *dev, if (sz > 0 && file_name[sz - 1] == '\n') file_name[sz - 1] = 0x00; - backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); + backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0); if (IS_ERR(backing_dev)) { err = PTR_ERR(backing_dev); backing_dev = NULL; goto out; } - mapping = backing_dev->f_mapping; - inode = mapping->host; + inode = backing_dev->f_mapping->host; /* Support only block device in this moment */ if (!S_ISBLK(inode->i_mode)) { @@ -513,14 +509,6 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev_file = bdev_file_open_by_dev(inode->i_rdev, - BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL); - if (IS_ERR(bdev_file)) { - err = PTR_ERR(bdev_file); - bdev_file = NULL; - goto out; - } - nr_pages = i_size_read(inode) >> PAGE_SHIFT; bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); bitmap = kvzalloc(bitmap_sz, GFP_KERNEL); @@ -531,7 +519,7 @@ static ssize_t backing_dev_store(struct device *dev, reset_bdev(zram); - zram->bdev_file = bdev_file; + zram->bdev = I_BDEV(inode); zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; @@ -544,9 +532,6 @@ static ssize_t backing_dev_store(struct device *dev, out: kvfree(bitmap); - if (bdev_file) - fput(bdev_file); - if (backing_dev) filp_close(backing_dev, NULL); @@ -587,7 +572,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, { struct bio *bio; - bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO); + bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); __bio_add_page(bio, page, PAGE_SIZE, 0); bio_chain(bio, parent); @@ -703,7 +688,7 @@ static ssize_t writeback_store(struct device *dev, continue; } - bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1, + bio_init(&bio, zram->bdev, &bio_vec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); __bio_add_page(&bio, page, PAGE_SIZE, 0); @@ -785,7 +770,7 @@ static void zram_sync_read(struct work_struct *work) struct bio_vec bv; struct bio bio; - bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ); + bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); __bio_add_page(&bio, zw->page, PAGE_SIZE, 0); zw->error = submit_bio_wait(&bio); @@ -1568,7 +1553,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, * Corresponding ZRAM slot should be locked. */ static int zram_recompress(struct zram *zram, u32 index, struct page *page, - u32 threshold, u32 prio, u32 prio_max) + u64 *num_recomp_pages, u32 threshold, u32 prio, + u32 prio_max) { struct zcomp_strm *zstrm = NULL; unsigned long handle_old; @@ -1645,6 +1631,15 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, if (!zstrm) return 0; + /* + * Decrement the limit (if set) on pages we can recompress, even + * when current recompression was unsuccessful or did not compress + * the page below the threshold, because we still spent resources + * on it. + */ + if (*num_recomp_pages) + *num_recomp_pages -= 1; + if (class_index_new >= class_index_old) { /* * Secondary algorithms failed to re-compress the page @@ -1710,6 +1705,7 @@ static ssize_t recompress_store(struct device *dev, struct zram *zram = dev_to_zram(dev); unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; char *args, *param, *val, *algo = NULL; + u64 num_recomp_pages = ULLONG_MAX; u32 mode = 0, threshold = 0; unsigned long index; struct page *page; @@ -1732,6 +1728,17 @@ static ssize_t recompress_store(struct device *dev, continue; } + if (!strcmp(param, "max_pages")) { + /* + * Limit the number of entries (pages) we attempt to + * recompress. + */ + ret = kstrtoull(val, 10, &num_recomp_pages); + if (ret) + return ret; + continue; + } + if (!strcmp(param, "threshold")) { /* * We will re-compress only idle objects equal or @@ -1788,6 +1795,9 @@ static ssize_t recompress_store(struct device *dev, for (index = 0; index < nr_pages; index++) { int err = 0; + if (!num_recomp_pages) + break; + zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -1807,8 +1817,8 @@ static ssize_t recompress_store(struct device *dev, zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - err = zram_recompress(zram, index, page, threshold, - prio, prio_max); + err = zram_recompress(zram, index, page, &num_recomp_pages, + threshold, prio, prio_max); next: zram_slot_unlock(zram, index); if (err) { |