diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/block | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoeblk.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 16 | ||||
-rw-r--r-- | drivers/block/floppy.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 7 | ||||
-rw-r--r-- | drivers/block/nbd.c | 6 | ||||
-rw-r--r-- | drivers/block/null_blk/main.c | 5 | ||||
-rw-r--r-- | drivers/block/null_blk/zoned.c | 2 | ||||
-rw-r--r-- | drivers/block/rbd.c | 10 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-clt.c | 13 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-proto.h | 14 | ||||
-rw-r--r-- | drivers/block/rnbd/rnbd-srv.c | 25 | ||||
-rw-r--r-- | drivers/block/ublk_drv.c | 13 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 80 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 2 | ||||
-rw-r--r-- | drivers/block/zram/Kconfig | 15 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 58 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.h | 2 |
17 files changed, 124 insertions, 149 deletions
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 37eff1c974..b1b47d88f5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -384,7 +384,8 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->flags & DEVFL_TKILL); WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); - blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS); + /* random number picked from the history block max_sectors cap */ + blk_queue_max_hw_sectors(gd->queue, 2560u); blk_queue_io_opt(gd->queue, SZ_2M); d->bufpool = mp; d->blkq = gd->queue; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 64b3a1c76f..742b2908ff 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -838,8 +838,8 @@ static bool plausible_request_size(int size) } /* clear the bit corresponding to the piece of storage in question: - * size byte of data starting from sector. Only clear a bits of the affected - * one ore more _aligned_ BM_BLOCK_SIZE blocks. + * size byte of data starting from sector. Only clear bits of the affected + * one or more _aligned_ BM_BLOCK_SIZE blocks. * * called by worker on C_SYNC_TARGET and receiver on SyncSource. * @@ -957,7 +957,9 @@ static int _is_in_al(struct drbd_device *device, unsigned int enr) * @device: DRBD device. * @sector: The sector number. * - * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. + * This functions sleeps on al_wait. + * + * Returns: %0 on success, -EINTR if interrupted. */ int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) { @@ -1004,11 +1006,13 @@ retry: /** * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep - * @device: DRBD device. + * @peer_device: DRBD device. * @sector: The sector number. * * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then - * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN + * tries to set it to BME_LOCKED. + * + * Returns: %0 upon success, and -EAGAIN * if there is still application IO going on in this area. */ int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector) @@ -1190,7 +1194,7 @@ void drbd_rs_cancel_all(struct drbd_device *device) * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU * @device: DRBD device. * - * Returns 0 upon success, -EAGAIN if at least one reference count was + * Returns: %0 upon success, -EAGAIN if at least one reference count was * not zero. */ int drbd_rs_del_all(struct drbd_device *device) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 11114a5d9e..d0e41d52d6 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3079,7 +3079,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) } } -#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) +#define MAX_LEN (1UL << MAX_PAGE_ORDER << PAGE_SHIFT) static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 552f56a84a..f8145499da 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -243,9 +243,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); - file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); - file_end_write(file); if (likely(bw == bvec->bv_len)) return 0; @@ -1301,8 +1299,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) loop_set_size(lo, new_size); } - loop_config_discard(lo); - /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); @@ -2036,7 +2032,8 @@ static int loop_add(int i) } lo->lo_queue = lo->lo_disk->queue; - blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); + /* random number picked from the history block max_sectors cap */ + blk_queue_max_hw_sectors(lo->lo_queue, 2560u); /* * By default, we do buffer IO, so it doesn't make sense to enable diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index df738eab02..b7c332528e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -334,10 +334,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, if (!nbd->pid) return 0; - if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { - nbd->disk->queue->limits.discard_granularity = blksize; + if (nbd->config->flags & NBD_FLAG_SEND_TRIM) blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); - } blk_queue_logical_block_size(nbd->disk->queue, blksize); blk_queue_physical_block_size(nbd->disk->queue, blksize); @@ -1353,7 +1351,6 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - nbd->disk->queue->limits.discard_granularity = 0; blk_queue_max_discard_sectors(nbd->disk->queue, 0); mutex_unlock(&nbd->config_lock); @@ -1846,7 +1843,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) * Tell the block layer that we are not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - disk->queue->limits.discard_granularity = 0; blk_queue_max_discard_sectors(disk->queue, 0); blk_queue_max_segment_size(disk->queue, UINT_MAX); blk_queue_max_segments(disk->queue, USHRT_MAX); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 13ed446b5e..36755f263e 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1840,7 +1840,7 @@ static void null_del_dev(struct nullb *nullb) dev = nullb->dev; - ida_simple_remove(&nullb_indexes, nullb->index); + ida_free(&nullb_indexes, nullb->index); list_del_init(&nullb->list); @@ -1880,7 +1880,6 @@ static void null_config_discard(struct nullb *nullb) return; } - nullb->q->limits.discard_granularity = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); } @@ -2175,7 +2174,7 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); mutex_lock(&lock); - rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); + rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) { mutex_unlock(&lock); goto out_cleanup_zone; diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 55c5b48bc2..6f5e099486 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb) struct nullb_device *dev = nullb->dev; struct request_queue *q = nullb->q; - disk_set_zoned(nullb->disk, BLK_ZONED_HM); + disk_set_zoned(nullb->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); blk_queue_chunk_sectors(q, dev->zone_size_sects); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1e2596c5ef..12b5d53ec8 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5332,7 +5332,7 @@ static void rbd_dev_release(struct device *dev) if (need_put) { destroy_workqueue(rbd_dev->task_wq); - ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); + ida_free(&rbd_dev_id_ida, rbd_dev->dev_id); } rbd_dev_free(rbd_dev); @@ -5408,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, return NULL; /* get an id and fill in device name */ - rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, - minor_to_rbd_dev_id(1 << MINORBITS), - GFP_KERNEL); + rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida, + minor_to_rbd_dev_id(1 << MINORBITS) - 1, + GFP_KERNEL); if (rbd_dev->dev_id < 0) goto fail_rbd_dev; @@ -5431,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, return rbd_dev; fail_dev_id: - ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); + ida_free(&rbd_dev_id_ida, rbd_dev->dev_id); fail_rbd_dev: rbd_dev_free(rbd_dev); return NULL; diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index b0550b6864..4044c369d2 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1006,10 +1006,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, msg.prio = cpu_to_le16(req_get_ioprio(rq)); /* - * We only support discards with single segment for now. + * We only support discards/WRITE_ZEROES with single segment for now. * See queue limits. */ - if (req_op(rq) != REQ_OP_DISCARD) + if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES)) sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); if (sg_cnt == 0) @@ -1362,6 +1362,8 @@ static void setup_request_queue(struct rnbd_clt_dev *dev, blk_queue_write_cache(dev->queue, !!(rsp->cache_policy & RNBD_WRITEBACK), !!(rsp->cache_policy & RNBD_FUA)); + blk_queue_max_write_zeroes_sectors(dev->queue, + le32_to_cpu(rsp->max_write_zeroes_sectors)); } static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, @@ -1567,8 +1569,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, dev = init_dev(sess, access_mode, pathname, nr_poll_queues); if (IS_ERR(dev)) { - pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", - pathname, sess->sessname, PTR_ERR(dev)); + pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %pe\n", + pathname, sess->sessname, dev); ret = PTR_ERR(dev); goto put_sess; } @@ -1626,10 +1628,11 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, } rnbd_clt_info(dev, - "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n", + "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_write_zeroes_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n", dev->gd->disk_name, le64_to_cpu(rsp->nsectors), le16_to_cpu(rsp->logical_block_size), le16_to_cpu(rsp->physical_block_size), + le32_to_cpu(rsp->max_write_zeroes_sectors), le32_to_cpu(rsp->max_discard_sectors), le32_to_cpu(rsp->discard_granularity), le32_to_cpu(rsp->discard_alignment), diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index e32f8f2c86..f35be51d21 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -128,7 +128,7 @@ enum rnbd_cache_policy { * @device_id: device_id on server side to identify the device * @nsectors: number of sectors in the usual 512b unit * @max_hw_sectors: max hardware sectors in the usual 512b unit - * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit + * @max_write_zeroes_sectors: max sectors for WRITE ZEROES in the 512b unit * @max_discard_sectors: max. sectors that can be discarded at once in 512b * unit. * @discard_granularity: size of the internal discard allocation unit in bytes @@ -145,7 +145,7 @@ struct rnbd_msg_open_rsp { __le32 device_id; __le64 nsectors; __le32 max_hw_sectors; - __le32 max_write_same_sectors; + __le32 max_write_zeroes_sectors; __le32 max_discard_sectors; __le32 discard_granularity; __le32 discard_alignment; @@ -186,7 +186,7 @@ struct rnbd_msg_io { * @RNBD_OP_FLUSH: flush the volatile write cache * @RNBD_OP_DISCARD: discard sectors * @RNBD_OP_SECURE_ERASE: securely erase sectors - * @RNBD_OP_WRITE_SAME: write the same sectors many times + * @RNBD_OP_WRITE_ZEROES: write zeroes sectors * @RNBD_F_SYNC: request is sync (sync write or read) * @RNBD_F_FUA: forced unit access @@ -199,7 +199,7 @@ enum rnbd_io_flags { RNBD_OP_FLUSH = 2, RNBD_OP_DISCARD = 3, RNBD_OP_SECURE_ERASE = 4, - RNBD_OP_WRITE_SAME = 5, + RNBD_OP_WRITE_ZEROES = 5, /* Flags */ RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0), @@ -236,6 +236,9 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) case RNBD_OP_SECURE_ERASE: bio_opf = REQ_OP_SECURE_ERASE; break; + case RNBD_OP_WRITE_ZEROES: + bio_opf = REQ_OP_WRITE_ZEROES; + break; default: WARN(1, "Unknown RNBD type: %d (flags %d)\n", rnbd_op(rnbd_opf), rnbd_opf); @@ -268,6 +271,9 @@ static inline u32 rq_to_rnbd_flags(struct request *rq) case REQ_OP_SECURE_ERASE: rnbd_opf = RNBD_OP_SECURE_ERASE; break; + case REQ_OP_WRITE_ZEROES: + rnbd_opf = RNBD_OP_WRITE_ZEROES; + break; case REQ_OP_FLUSH: rnbd_opf = RNBD_OP_FLUSH; break; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index ab78eab97d..3a0d5dcec6 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -136,8 +136,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, sess_dev = rnbd_get_sess_dev(dev_id, srv_sess); if (IS_ERR(sess_dev)) { - pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n", - srv_sess->sessname, dev_id); + pr_err_ratelimited("Got I/O request on session %s for unknown device id %d: %pe\n", + srv_sess->sessname, dev_id, sess_dev); err = -ENOTCONN; goto err; } @@ -544,7 +544,8 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev)); rsp->max_hw_sectors = cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); - rsp->max_write_same_sectors = 0; + rsp->max_write_zeroes_sectors = + cpu_to_le32(bdev_write_zeroes_sectors(bdev)); rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev)); rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev)); rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev)); @@ -710,24 +711,24 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name); if (IS_ERR(full_path)) { ret = PTR_ERR(full_path); - pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n", - open_msg->dev_name, srv_sess->sessname, ret); + pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %pe\n", + open_msg->dev_name, srv_sess->sessname, full_path); goto reject; } bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL); if (IS_ERR(bdev_handle)) { ret = PTR_ERR(bdev_handle); - pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n", - full_path, srv_sess->sessname, ret); + pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n", + full_path, srv_sess->sessname, bdev_handle); goto free_path; } srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess, open_msg->access_mode); if (IS_ERR(srv_dev)) { - pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n", - full_path, srv_sess->sessname, PTR_ERR(srv_dev)); + pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n", + full_path, srv_sess->sessname, srv_dev); ret = PTR_ERR(srv_dev); goto blkdev_put; } @@ -737,8 +738,8 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, open_msg->access_mode == RNBD_ACCESS_RO, srv_dev); if (IS_ERR(srv_sess_dev)) { - pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n", - full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev)); + pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %pe\n", + full_path, srv_sess->sessname, srv_sess_dev); ret = PTR_ERR(srv_sess_dev); goto srv_dev_put; } @@ -819,7 +820,7 @@ static int __init rnbd_srv_init_module(void) }; rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr); if (IS_ERR(rtrs_ctx)) { - pr_err("rtrs_srv_open(), err: %d\n", err); + pr_err("rtrs_srv_open(), err: %pe\n", rtrs_ctx); return PTR_ERR(rtrs_ctx); } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 83600b45e1..1dfb2e7789 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -36,7 +36,7 @@ #include <linux/sched/mm.h> #include <linux/uaccess.h> #include <linux/cdev.h> -#include <linux/io_uring.h> +#include <linux/io_uring/cmd.h> #include <linux/blk-mq.h> #include <linux/delay.h> #include <linux/mm.h> @@ -250,7 +250,7 @@ static int ublk_dev_param_zoned_apply(struct ublk_device *ub) { const struct ublk_param_zoned *p = &ub->params.zoned; - disk_set_zoned(ub->ub_disk, BLK_ZONED_HM); + disk_set_zoned(ub->ub_disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); blk_queue_required_elevator_features(ub->ub_disk->queue, ELEVATOR_F_ZBD_SEQ_WRITE); @@ -893,12 +893,9 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, */ if (ublk_need_map_req(req)) { struct iov_iter iter; - struct iovec iov; const int dir = ITER_DEST; - import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes, - &iov, &iter); - + import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; @@ -915,13 +912,11 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, if (ublk_need_unmap_req(req)) { struct iov_iter iter; - struct iovec iov; const int dir = ITER_SOURCE; WARN_ON_ONCE(io->res > rq_bytes); - import_single_range(dir, u64_to_user_ptr(io->addr), io->res, - &iov, &iter); + import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2c846eed5a..2bf14a0e28 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -367,8 +367,6 @@ static void virtblk_done(struct virtqueue *vq) blk_mq_complete_request(req); req_done = true; } - if (unlikely(virtqueue_is_broken(vq))) - break; } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ @@ -722,52 +720,15 @@ fail_report: return ret; } -static void virtblk_revalidate_zones(struct virtio_blk *vblk) -{ - u8 model; - - virtio_cread(vblk->vdev, struct virtio_blk_config, - zoned.model, &model); - switch (model) { - default: - dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model); - fallthrough; - case VIRTIO_BLK_Z_NONE: - case VIRTIO_BLK_Z_HA: - disk_set_zoned(vblk->disk, BLK_ZONED_NONE); - return; - case VIRTIO_BLK_Z_HM: - WARN_ON_ONCE(!vblk->zone_sectors); - if (!blk_revalidate_disk_zones(vblk->disk, NULL)) - set_capacity_and_notify(vblk->disk, 0); - } -} - static int virtblk_probe_zoned_device(struct virtio_device *vdev, struct virtio_blk *vblk, struct request_queue *q) { u32 v, wg; - u8 model; - - virtio_cread(vdev, struct virtio_blk_config, - zoned.model, &model); - - switch (model) { - case VIRTIO_BLK_Z_NONE: - case VIRTIO_BLK_Z_HA: - /* Present the host-aware device as non-zoned */ - return 0; - case VIRTIO_BLK_Z_HM: - break; - default: - dev_err(&vdev->dev, "unsupported zone model %d\n", model); - return -EINVAL; - } dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - disk_set_zoned(vblk->disk, BLK_ZONED_HM); + disk_set_zoned(vblk->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); virtio_cread(vdev, struct virtio_blk_config, @@ -839,23 +800,12 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, */ #define virtblk_report_zones NULL -static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) -{ -} - static inline int virtblk_probe_zoned_device(struct virtio_device *vdev, struct virtio_blk *vblk, struct request_queue *q) { - u8 model; - - virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model); - if (model == VIRTIO_BLK_Z_HM) { - dev_err(&vdev->dev, - "virtio_blk: zoned devices are not supported"); - return -EOPNOTSUPP; - } - - return 0; + dev_err(&vdev->dev, + "virtio_blk: zoned devices are not supported"); + return -EOPNOTSUPP; } #endif /* CONFIG_BLK_DEV_ZONED */ @@ -1005,7 +955,6 @@ static void virtblk_config_changed_work(struct work_struct *work) struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); - virtblk_revalidate_zones(vblk); virtblk_update_capacity(vblk, true); } @@ -1570,9 +1519,26 @@ static int virtblk_probe(struct virtio_device *vdev) * placed after the virtio_device_ready() call above. */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { - err = virtblk_probe_zoned_device(vdev, vblk, q); - if (err) + u8 model; + + virtio_cread(vdev, struct virtio_blk_config, zoned.model, + &model); + switch (model) { + case VIRTIO_BLK_Z_NONE: + case VIRTIO_BLK_Z_HA: + /* Present the host-aware device as non-zoned */ + break; + case VIRTIO_BLK_Z_HM: + err = virtblk_probe_zoned_device(vdev, vblk, q); + if (err) + goto out_cleanup_disk; + break; + default: + dev_err(&vdev->dev, "unsupported zone model %d\n", + model); + err = -EINVAL; goto out_cleanup_disk; + } } err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 5ff50e76ce..1432c83183 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -132,7 +132,7 @@ struct blkif_x86_32_request { struct blkif_x86_64_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ - uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */ + uint32_t _pad1; /* offsetof(blkif_request..,u.rw.id)==8 */ uint64_t id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index 0386b7da02..7b29cce60a 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -59,8 +59,8 @@ config ZRAM_WRITEBACK bool "Write back incompressible or idle page to backing device" depends on ZRAM help - With incompressible page, there is no memory saving to keep it - in memory. Instead, write it out to backing device. + This lets zram entries (incompressible or idle pages) be written + back to a backing device, helping save memory. For this feature, admin should set up backing device via /sys/block/zramX/backing_dev. @@ -69,9 +69,18 @@ config ZRAM_WRITEBACK See Documentation/admin-guide/blockdev/zram.rst for more information. +config ZRAM_TRACK_ENTRY_ACTIME + bool "Track access time of zram entries" + depends on ZRAM + help + With this feature zram tracks access time of every stored + entry (page), which can be used for a more fine grained IDLE + pages writeback. + config ZRAM_MEMORY_TRACKING bool "Track zRam block status" depends on ZRAM && DEBUG_FS + select ZRAM_TRACK_ENTRY_ACTIME help With this feature, admin can track the state of allocated blocks of zRAM. Admin could see the information via @@ -86,4 +95,4 @@ config ZRAM_MULTI_COMP This will enable multi-compression streams, so that ZRAM can re-compress pages using a potentially slower but more effective compression algorithm. Note, that IDLE page recompression - requires ZRAM_MEMORY_TRACKING. + requires ZRAM_TRACK_ENTRY_ACTIME. diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d77d3664ca..6772e0c654 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -174,6 +174,14 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index) return prio & ZRAM_COMP_PRIORITY_MASK; } +static void zram_accessed(struct zram *zram, u32 index) +{ + zram_clear_flag(zram, index, ZRAM_IDLE); +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + zram->table[index].ac_time = ktime_get_boottime(); +#endif +} + static inline void update_used_max(struct zram *zram, const unsigned long pages) { @@ -293,8 +301,9 @@ static void mark_idle(struct zram *zram, ktime_t cutoff) zram_slot_lock(zram, index); if (zram_allocated(zram, index) && !zram_test_flag(zram, index, ZRAM_UNDER_WB)) { -#ifdef CONFIG_ZRAM_MEMORY_TRACKING - is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time); +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + is_idle = !cutoff || ktime_after(cutoff, + zram->table[index].ac_time); #endif if (is_idle) zram_set_flag(zram, index, ZRAM_IDLE); @@ -317,7 +326,7 @@ static ssize_t idle_store(struct device *dev, */ u64 age_sec; - if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec)) + if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec)) cutoff_time = ktime_sub(ktime_get_boottime(), ns_to_ktime(age_sec * NSEC_PER_SEC)); else @@ -841,12 +850,6 @@ static void zram_debugfs_destroy(void) debugfs_remove_recursive(zram_debugfs_root); } -static void zram_accessed(struct zram *zram, u32 index) -{ - zram_clear_flag(zram, index, ZRAM_IDLE); - zram->table[index].ac_time = ktime_get_boottime(); -} - static ssize_t read_block_state(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -930,10 +933,6 @@ static void zram_debugfs_unregister(struct zram *zram) #else static void zram_debugfs_create(void) {}; static void zram_debugfs_destroy(void) {}; -static void zram_accessed(struct zram *zram, u32 index) -{ - zram_clear_flag(zram, index, ZRAM_IDLE); -}; static void zram_debugfs_register(struct zram *zram) {}; static void zram_debugfs_unregister(struct zram *zram) {}; #endif @@ -1254,7 +1253,7 @@ static void zram_free_page(struct zram *zram, size_t index) { unsigned long handle; -#ifdef CONFIG_ZRAM_MEMORY_TRACKING +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME zram->table[index].ac_time = 0; #endif if (zram_test_flag(zram, index, ZRAM_IDLE)) @@ -1322,9 +1321,9 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, void *mem; value = handle ? zram_get_element(zram, index) : 0; - mem = kmap_atomic(page); + mem = kmap_local_page(page); zram_fill_page(mem, PAGE_SIZE, value); - kunmap_atomic(mem); + kunmap_local(mem); return 0; } @@ -1337,14 +1336,14 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { - dst = kmap_atomic(page); + dst = kmap_local_page(page); memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); + kunmap_local(dst); ret = 0; } else { - dst = kmap_atomic(page); + dst = kmap_local_page(page); ret = zcomp_decompress(zstrm, src, size, dst); - kunmap_atomic(dst); + kunmap_local(dst); zcomp_stream_put(zram->comps[prio]); } zs_unmap_object(zram->mem_pool, handle); @@ -1417,21 +1416,21 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) unsigned long element = 0; enum zram_pageflags flags = 0; - mem = kmap_atomic(page); + mem = kmap_local_page(page); if (page_same_filled(mem, &element)) { - kunmap_atomic(mem); + kunmap_local(mem); /* Free memory associated with this sector now. */ flags = ZRAM_SAME; atomic64_inc(&zram->stats.same_pages); goto out; } - kunmap_atomic(mem); + kunmap_local(mem); compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); - src = kmap_atomic(page); + src = kmap_local_page(page); ret = zcomp_compress(zstrm, src, &comp_len); - kunmap_atomic(src); + kunmap_local(src); if (unlikely(ret)) { zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); @@ -1495,10 +1494,10 @@ compress_again: src = zstrm->buffer; if (comp_len == PAGE_SIZE) - src = kmap_atomic(page); + src = kmap_local_page(page); memcpy(dst, src, comp_len); if (comp_len == PAGE_SIZE) - kunmap_atomic(src); + kunmap_local(src); zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_unmap_object(zram->mem_pool, handle); @@ -1615,9 +1614,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); - src = kmap_atomic(page); + src = kmap_local_page(page); ret = zcomp_compress(zstrm, src, &comp_len_new); - kunmap_atomic(src); + kunmap_local(src); if (ret) { zcomp_stream_put(zram->comps[prio]); @@ -2227,7 +2226,6 @@ static int zram_add(void) ZRAM_LOGICAL_BLOCK_SIZE); blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); - zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); /* diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index d090753f97..3b94d12f41 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -69,7 +69,7 @@ struct zram_table_entry { unsigned long element; }; unsigned long flags; -#ifdef CONFIG_ZRAM_MEMORY_TRACKING +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME ktime_t ac_time; #endif }; |