summaryrefslogtreecommitdiffstats
path: root/io_uring/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c105
1 files changed, 58 insertions, 47 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index dbabe0058..4afb475d4 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -242,27 +242,42 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
- if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
+ if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
+ ret = -EFAULT;
+ unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
+ unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
+ unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
+ unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
+ unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
+ unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
+ msg->msg_flags = 0;
+
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg->msg_iovlen > 1) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto ua_end;
} else {
- if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
- sizeof(*msg->msg_iov)))
- return -EFAULT;
+ /* we only need the length for provided buffers */
+ if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+ goto ua_end;
+ unsafe_get_user(iomsg->fast_iov[0].iov_len,
+ &msg->msg_iov[0].iov_len, ua_end);
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
-
- return 0;
+ ret = 0;
+ua_end:
+ user_access_end();
+ return ret;
}
+ user_access_end();
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);
@@ -391,6 +406,17 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
+static void io_req_msg_cleanup(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg,
+ unsigned int issue_flags)
+{
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ /* fast path, check for non-NULL to avoid function call */
+ if (kmsg->free_iov)
+ kfree(kmsg->free_iov);
+ io_netmsg_recycle(req, issue_flags);
+}
+
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -433,18 +459,14 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- io_netmsg_recycle(req, issue_flags);
+ io_req_msg_cleanup(req, kmsg, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
@@ -512,7 +534,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -657,6 +679,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ req->flags &= ~REQ_F_BL_EMPTY;
sr->done_io = 0;
sr->len = 0; /* get from the provided buffer */
req->buf_index = sr->buf_group;
@@ -675,30 +698,22 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags);
- if (msg->msg_inq && msg->msg_inq != -1)
+ if (msg->msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- io_req_set_res(req, *ret, cflags);
- *ret = IOU_OK;
- return true;
- }
-
- if (mshot_finished)
- goto finish;
-
/*
* Fill CQE for this receive and see if we should keep trying to
* receive from this socket.
*/
- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
+ io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
*ret, cflags | IORING_CQE_F_MORE)) {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
- if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
/* mshot retries exceeded, force a requeue */
@@ -711,8 +726,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
*ret = -EAGAIN;
return true;
}
- /* Otherwise stop multishot but use the current result. */
-finish:
+
+ /* Finish the request / stop multishot. */
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -833,6 +848,10 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
+ flags = sr->msg_flags;
+ if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -853,10 +872,6 @@ retry_multishot:
iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
}
- flags = sr->msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_inq = -1;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
@@ -882,7 +897,7 @@ retry_multishot:
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -902,13 +917,9 @@ retry_multishot:
if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
goto retry_multishot;
- if (mshot_finished) {
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- io_netmsg_recycle(req, issue_flags);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- } else if (ret == -EAGAIN)
+ if (mshot_finished)
+ io_req_msg_cleanup(req, kmsg, issue_flags);
+ else if (ret == -EAGAIN)
return io_setup_async_msg(req, kmsg, issue_flags);
return ret;
@@ -940,6 +951,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_iocb = NULL;
msg.msg_ubuf = NULL;
+ flags = sr->msg_flags;
+ if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -958,9 +973,6 @@ retry_multishot:
msg.msg_inq = -1;
msg.msg_flags = 0;
- flags = sr->msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
@@ -978,7 +990,7 @@ retry_multishot:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -1029,7 +1041,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_kiocb *notif;
zc->done_io = 0;
- req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
@@ -1222,7 +1233,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -1293,7 +1304,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)