summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-01-18 19:11:11 -0700
committerJens Axboe <axboe@kernel.dk>2022-01-18 19:28:43 -0700
commitccbf726171b7328f800bc98005132fd77eb1a175 (patch)
tree2d6cdfdb218aea19bd2896b8f69bad1103b83b74 /fs/io_uring.c
parent361aee450c6e36c8dbab712c94a8a7835bd92e25 (diff)
io_uring: perform poll removal even if async work removal is successful
An active work can have poll armed, hence it's not enough to just do the async work removal and return the value if it's different from "not found". Rather than make poll removal special, just fall through to do the remaining type lookups and removals. Reported-by: Florian Fischer <florian.fl.fischer@fau.de> Link: https://lore.kernel.org/io-uring/20220118151337.fac6cthvbnu7icoc@pasture/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 422d6de48688..e54c4127422e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6386,16 +6386,21 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
- if (ret != -ENOENT)
- return ret;
+ /*
+ * Fall-through even for -EALREADY, as we may have poll armed
+ * that need unarming.
+ */
+ if (!ret)
+ return 0;
spin_lock(&ctx->completion_lock);
+ ret = io_poll_cancel(ctx, sqe_addr, false);
+ if (ret != -ENOENT)
+ goto out;
+
spin_lock_irq(&ctx->timeout_lock);
ret = io_timeout_cancel(ctx, sqe_addr);
spin_unlock_irq(&ctx->timeout_lock);
- if (ret != -ENOENT)
- goto out;
- ret = io_poll_cancel(ctx, sqe_addr, false);
out:
spin_unlock(&ctx->completion_lock);
return ret;