diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
commit | dc50eab76b709d68175a358d6e23a5a3890764d3 (patch) | |
tree | c754d0390db060af0213ff994f0ac310e4cfd6e9 /io_uring/rw.c | |
parent | Adding debian version 6.6.15-2. (diff) | |
download | linux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r-- | io_uring/rw.c | 170 |
1 files changed, 145 insertions, 25 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c index 0a0c1c9db0..9394bf83e8 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -18,6 +18,7 @@ #include "opdef.h" #include "kbuf.h" #include "rsrc.h" +#include "poll.h" #include "rw.h" struct io_rw { @@ -83,18 +84,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) /* used for fixed read/write too - just read unconditionally */ req->buf_index = READ_ONCE(sqe->buf_index); - if (req->opcode == IORING_OP_READ_FIXED || - req->opcode == IORING_OP_WRITE_FIXED) { - struct io_ring_ctx *ctx = req->ctx; - u16 index; - - if (unlikely(req->buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); - req->imu = ctx->user_bufs[index]; - io_req_set_rsrc_node(req, ctx, 0); - } - ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { ret = ioprio_check_cap(ioprio); @@ -110,16 +99,66 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) rw->addr = READ_ONCE(sqe->addr); rw->len = READ_ONCE(sqe->len); rw->flags = READ_ONCE(sqe->rw_flags); + return 0; +} + +int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + int ret; + + ret = io_prep_rw(req, sqe); + if (unlikely(ret)) + return ret; - /* Have to do this validation here, as this is in io_read() rw->len might - * have chanaged due to buffer selection + /* + * Have to do this validation here, as this is in io_read() rw->len + * might have chanaged due to buffer selection */ - if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) { - ret = io_iov_buffer_select_prep(req); - if (ret) - return ret; - } + if (req->flags & REQ_F_BUFFER_SELECT) + return io_iov_buffer_select_prep(req); + + return 0; +} + +int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = req->ctx; + u16 index; + int ret; + + ret = io_prep_rw(req, sqe); + if (unlikely(ret)) + return ret; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req, ctx, 0); + return 0; +} + +/* + * Multishot read is prepared just like a normal read/write request, only + * difference is that we set the MULTISHOT flag. + */ +int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); + int ret; + + /* must be used with provided buffers */ + if (!(req->flags & REQ_F_BUFFER_SELECT)) + return -EINVAL; + + ret = io_prep_rw(req, sqe); + if (unlikely(ret)) + return ret; + + if (rw->addr || rw->len) + return -EINVAL; + req->flags |= REQ_F_APOLL_MULTISHOT; return 0; } @@ -388,8 +427,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, buf = u64_to_user_ptr(rw->addr); sqe_len = rw->len; - if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE || - (req->flags & REQ_F_BUFFER_SELECT)) { + if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) { if (io_do_buffer_select(req)) { buf = io_buffer_select(req, &sqe_len, issue_flags); if (!buf) @@ -527,6 +565,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, { if (!force && !io_cold_defs[req->opcode].prep_async) return 0; + /* opcode type doesn't need async data */ + if (!io_cold_defs[req->opcode].async_size) + return 0; if (!req_has_async_data(req)) { struct io_async_rw *iorw; @@ -712,7 +753,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) return 0; } -int io_read(struct io_kiocb *req, unsigned int issue_flags) +static int __io_read(struct io_kiocb *req, unsigned int issue_flags) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_rw_state __s, *s = &__s; @@ -780,8 +821,11 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; - /* if we can poll, just do that */ - if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) + /* + * If we can poll, just do that. For a vectored read, we'll + * need to copy state first. + */ + if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored) return -EAGAIN; /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) @@ -857,7 +901,83 @@ done: /* it's faster to check here then delegate to kfree */ if (iovec) kfree(iovec); - return kiocb_done(req, ret, issue_flags); + return ret; +} + +int io_read(struct io_kiocb *req, unsigned int issue_flags) +{ + int ret; + + ret = __io_read(req, issue_flags); + if (ret >= 0) + return kiocb_done(req, ret, issue_flags); + + return ret; +} + +int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); + unsigned int cflags = 0; + int ret; + + /* + * Multishot MUST be used on a pollable file + */ + if (!file_can_poll(req->file)) + return -EBADFD; + + ret = __io_read(req, issue_flags); + + /* + * If we get -EAGAIN, recycle our buffer and just let normal poll + * handling arm it. + */ + if (ret == -EAGAIN) { + /* + * Reset rw->len to 0 again to avoid clamping future mshot + * reads, in case the buffer size varies. + */ + if (io_kbuf_recycle(req, issue_flags)) + rw->len = 0; + return -EAGAIN; + } + + /* + * Any successful return value will keep the multishot read armed. + */ + if (ret > 0) { + /* + * Put our buffer and post a CQE. If we fail to post a CQE, then + * jump to the termination path. This request is then done. + */ + cflags = io_put_kbuf(req, issue_flags); + rw->len = 0; /* similarly to above, reset len to 0 */ + + if (io_fill_cqe_req_aux(req, + issue_flags & IO_URING_F_COMPLETE_DEFER, + ret, cflags | IORING_CQE_F_MORE)) { + if (issue_flags & IO_URING_F_MULTISHOT) { + /* + * Force retry, as we might have more data to + * be read and otherwise it won't get retried + * until (if ever) another poll is triggered. + */ + io_poll_multishot_retry(req); + return IOU_ISSUE_SKIP_COMPLETE; + } + return -EAGAIN; + } + } + + /* + * Either an error, or we've hit overflow posting the CQE. For any + * multishot request, hitting overflow will terminate it. + */ + io_req_set_res(req, ret, cflags); + if (issue_flags & IO_URING_F_MULTISHOT) + return IOU_STOP_MULTISHOT; + return IOU_OK; } int io_write(struct io_kiocb *req, unsigned int issue_flags) |