diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-10-22 16:43:11 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-10-23 13:07:11 -0600 |
commit | c9abd7ad832b9eef06d887f4971894af5de617fd (patch) | |
tree | 37500f1fa493564cee31b9a8d0033448b81a1419 /fs | |
parent | cdfcc3ee04599ce51e5c84432c177163637dd0e0 (diff) |
io_uring: don't defer put of cancelled ltimeout
Inline io_link_cancel_timeout() and __io_kill_linked_timeout() into
io_kill_linked_timeout(). That allows to easily move a put of a cancelled
linked timeout out of completion_lock and to not deferring it. It is also
much more readable when not scattered across three different functions.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 58 |
1 files changed, 20 insertions, 38 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 552c27850c36..f6cb2b62ce1a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1846,57 +1846,39 @@ static void __io_free_req(struct io_kiocb *req) percpu_ref_put(&ctx->refs); } -static bool io_link_cancel_timeout(struct io_kiocb *req) +static void io_kill_linked_timeout(struct io_kiocb *req) { - struct io_timeout_data *io = req->async_data; struct io_ring_ctx *ctx = req->ctx; - int ret; - - ret = hrtimer_try_to_cancel(&io->timer); - if (ret != -1) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(ctx); - io_put_req_deferred(req, 1); - return true; - } - - return false; -} - -static bool __io_kill_linked_timeout(struct io_kiocb *req) -{ struct io_kiocb *link; - bool wake_ev; - - if (list_empty(&req->link_list)) - return false; - link = list_first_entry(&req->link_list, struct io_kiocb, link_list); + bool cancelled = false; + unsigned long flags; + spin_lock_irqsave(&ctx->completion_lock, flags); + link = list_first_entry_or_null(&req->link_list, struct io_kiocb, + link_list); /* * Can happen if a linked timeout fired and link had been like * req -> link t-out -> link t-out [-> ...] */ - if (!(link->flags & REQ_F_LTIMEOUT_ACTIVE)) - return false; - - list_del_init(&link->link_list); - wake_ev = io_link_cancel_timeout(link); - return wake_ev; -} - -static void io_kill_linked_timeout(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - bool wake_ev; + if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) { + struct io_timeout_data *io = link->async_data; + int ret; - spin_lock_irqsave(&ctx->completion_lock, flags); - wake_ev = __io_kill_linked_timeout(req); + list_del_init(&link->link_list); + ret = hrtimer_try_to_cancel(&io->timer); + if (ret != -1) { + io_cqring_fill_event(link, -ECANCELED); + io_commit_cqring(ctx); + cancelled = true; + } + } req->flags &= ~REQ_F_LINK_TIMEOUT; spin_unlock_irqrestore(&ctx->completion_lock, flags); - if (wake_ev) + if (cancelled) { io_cqring_ev_posted(ctx); + io_put_req(link); + } } static struct io_kiocb *io_req_link_next(struct io_kiocb *req) |