diff options
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r-- | io_uring/kbuf.c | 55 |
1 files changed, 23 insertions, 32 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index b2c4f82937..3aa16e27f5 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -71,15 +71,6 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) struct io_buffer_list *bl; struct io_buffer *buf; - /* - * For legacy provided buffer mode, don't recycle if we already did - * IO to this buffer. For ring-mapped provided buffer mode, we should - * increment ring->head to explicitly monopolize the buffer to avoid - * multiple use. - */ - if (req->flags & REQ_F_PARTIAL_IO) - return false; - io_ring_submit_lock(ctx, issue_flags); buf = req->kbuf; @@ -92,10 +83,8 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) return true; } -unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) +void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) { - unsigned int cflags; - /* * We can add this buffer back to two lists: * @@ -108,21 +97,17 @@ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) * We migrate buffers from the comp_list to the issue cache list * when we need one. */ - if (req->flags & REQ_F_BUFFER_RING) { - /* no buffers to recycle for this case */ - cflags = __io_put_kbuf_list(req, NULL); - } else if (issue_flags & IO_URING_F_UNLOCKED) { + if (issue_flags & IO_URING_F_UNLOCKED) { struct io_ring_ctx *ctx = req->ctx; spin_lock(&ctx->completion_lock); - cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp); + __io_put_kbuf_list(req, &ctx->io_buffers_comp); spin_unlock(&ctx->completion_lock); } else { lockdep_assert_held(&req->ctx->uring_lock); - cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache); + __io_put_kbuf_list(req, &req->ctx->io_buffers_cache); } - return cflags; } static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, @@ -135,6 +120,8 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, list_del(&kbuf->list); if (*len == 0 || *len > kbuf->len) *len = kbuf->len; + if (list_empty(&bl->buf_list)) + req->flags |= REQ_F_BL_EMPTY; req->flags |= REQ_F_BUFFER_SELECTED; req->kbuf = kbuf; req->buf_index = kbuf->bid; @@ -148,12 +135,16 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) { struct io_uring_buf_ring *br = bl->buf_ring; + __u16 tail, head = bl->head; struct io_uring_buf *buf; - __u16 head = bl->head; - if (unlikely(smp_load_acquire(&br->tail) == head)) + tail = smp_load_acquire(&br->tail); + if (unlikely(tail == head)) return NULL; + if (head + 1 == tail) + req->flags |= REQ_F_BL_EMPTY; + head &= bl->mask; /* mmaped buffers are always contig */ if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) { @@ -170,7 +161,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, req->buf_list = bl; req->buf_index = buf->bid; - if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) { + if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { /* * If we came in unlocked, we have no choice but to consume the * buffer here, otherwise nothing ensures that the buffer won't @@ -198,7 +189,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len, bl = io_buffer_get_list(ctx, req->buf_index); if (likely(bl)) { - if (bl->is_mapped) + if (bl->is_buf_ring) ret = io_ring_buffer_select(req, len, bl, issue_flags); else ret = io_provided_buffer_select(req, len, bl); @@ -234,7 +225,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, if (!nbufs) return 0; - if (bl->is_mapped) { + if (bl->is_buf_ring) { i = bl->buf_ring->tail - bl->head; if (bl->is_mmap) { /* @@ -255,7 +246,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, } /* make sure it's seen as empty */ INIT_LIST_HEAD(&bl->buf_list); - bl->is_mapped = 0; + bl->is_buf_ring = 0; return i; } @@ -342,7 +333,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) if (bl) { ret = -EINVAL; /* can't use provide/remove buffers command on mapped buffers */ - if (!bl->is_mapped) + if (!bl->is_buf_ring) ret = __io_remove_buffers(ctx, bl, p->nbufs); } io_ring_submit_unlock(ctx, issue_flags); @@ -489,7 +480,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) } } /* can't add buffers via this command for a mapped buffer ring */ - if (bl->is_mapped) { + if (bl->is_buf_ring) { ret = -EINVAL; goto err; } @@ -545,7 +536,7 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg, bl->buf_pages = pages; bl->buf_nr_pages = nr_pages; bl->buf_ring = br; - bl->is_mapped = 1; + bl->is_buf_ring = 1; bl->is_mmap = 0; return 0; error_unpin: @@ -612,7 +603,7 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx, } ibf->inuse = 1; bl->buf_ring = ibf->mem; - bl->is_mapped = 1; + bl->is_buf_ring = 1; bl->is_mmap = 1; return 0; } @@ -652,7 +643,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) bl = io_buffer_get_list(ctx, reg.bgid); if (bl) { /* if mapped buffer ring OR classic exists, don't allow */ - if (bl->is_mapped || !list_empty(&bl->buf_list)) + if (bl->is_buf_ring || !list_empty(&bl->buf_list)) return -EEXIST; } else { free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); @@ -694,7 +685,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) bl = io_buffer_get_list(ctx, reg.bgid); if (!bl) return -ENOENT; - if (!bl->is_mapped) + if (!bl->is_buf_ring) return -EINVAL; xa_erase(&ctx->io_bl_xa, bl->bgid); @@ -718,7 +709,7 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) bl = io_buffer_get_list(ctx, buf_status.buf_group); if (!bl) return -ENOENT; - if (!bl->is_mapped) + if (!bl->is_buf_ring) return -EINVAL; buf_status.head = bl->head; |