From e54def4ad8144ab15f826416e2e0f290ef1901b4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:30 +0200 Subject: Adding upstream version 6.9.2. Signed-off-by: Daniel Baumann --- io_uring/io_uring.h | 79 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 9 deletions(-) (limited to 'io_uring/io_uring.h') diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index c0cff024db..6426ee3822 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "io-wq.h" @@ -34,6 +35,32 @@ enum { IOU_STOP_MULTISHOT = -ECANCELED, }; +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned cq_tail; + unsigned nr_timeouts; + ktime_t timeout; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int napi_busy_poll_to; + bool napi_prefer_busy_poll; +#endif +}; + +static inline bool io_should_wake(struct io_wait_queue *iowq) +{ + struct io_ring_ctx *ctx = iowq->ctx; + int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; + + /* + * Wake up if we have enough events, or if a timeout occurred since we + * started waiting. For timeouts, we always want to return to userspace, + * regardless of event count. + */ + return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; +} + bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); void io_req_cqe_overflow(struct io_kiocb *req); int io_run_task_work_sig(struct io_ring_ctx *ctx); @@ -56,6 +83,8 @@ void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); +struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); +struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); void tctx_task_work(struct callback_head *cb); __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); int io_uring_alloc_task_context(struct task_struct *task, @@ -207,7 +236,7 @@ static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags) { lockdep_assert_held(&ctx->uring_lock); - if (issue_flags & IO_URING_F_UNLOCKED) + if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) mutex_unlock(&ctx->uring_lock); } @@ -220,7 +249,7 @@ static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, * The only exception is when we've detached the request and issue it * from an async worker thread, grab the lock for that case. */ - if (issue_flags & IO_URING_F_UNLOCKED) + if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) mutex_lock(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock); } @@ -274,6 +303,8 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) static inline int io_run_task_work(void) { + bool ret = false; + /* * Always check-and-clear the task_work notification signal. With how * signaling works for task_work, we can find it set with nothing to @@ -285,23 +316,31 @@ static inline int io_run_task_work(void) * PF_IO_WORKER never returns to userspace, so check here if we have * notify work that needs processing. */ - if (current->flags & PF_IO_WORKER && - test_thread_flag(TIF_NOTIFY_RESUME)) { - __set_current_state(TASK_RUNNING); - resume_user_mode_work(NULL); + if (current->flags & PF_IO_WORKER) { + if (test_thread_flag(TIF_NOTIFY_RESUME)) { + __set_current_state(TASK_RUNNING); + resume_user_mode_work(NULL); + } + if (current->io_uring) { + unsigned int count = 0; + + tctx_task_work_run(current->io_uring, UINT_MAX, &count); + if (count) + ret = true; + } } if (task_work_pending(current)) { __set_current_state(TASK_RUNNING); task_work_run(); - return 1; + ret = true; } - return 0; + return ret; } static inline bool io_task_work_pending(struct io_ring_ctx *ctx) { - return task_work_pending(current) || !llist_empty(&ctx->work_llist); + return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); } static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) @@ -398,4 +437,26 @@ static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) return 2 * sizeof(struct io_uring_sqe); return sizeof(struct io_uring_sqe); } + +static inline bool io_file_can_poll(struct io_kiocb *req) +{ + if (req->flags & REQ_F_CAN_POLL) + return true; + if (file_can_poll(req->file)) { + req->flags |= REQ_F_CAN_POLL; + return true; + } + return false; +} + +enum { + IO_CHECK_CQ_OVERFLOW_BIT, + IO_CHECK_CQ_DROPPED_BIT, +}; + +static inline bool io_has_work(struct io_ring_ctx *ctx) +{ + return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || + !llist_empty(&ctx->work_llist); +} #endif -- cgit v1.2.3