summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHao Xu <haoxu@linux.alibaba.com>2021-09-22 18:12:38 +0800
committerJens Axboe <axboe@kernel.dk>2021-09-24 10:24:34 -0600
commit5b7aa38d86f348847a48f71e9ac7715406de900e (patch)
treef564a01423b873890b12f8c170858e7f2020334d
parenta62682f92eedb41c1cd8290fa875a4b85624fb9a (diff)
io_uring: fix potential req refcount underflow
For multishot mode, there may be cases like: iowq original context io_poll_add _arm_poll() mask = vfs_poll() is not 0 if mask (2) io_poll_complete() compl_unlock (interruption happens tw queued to original context) io_poll_task_func() compl_lock (3) done = io_poll_complete() is true compl_unlock put req ref (1) if (poll->flags & EPOLLONESHOT) put req ref EPOLLONESHOT flag in (1) may be from (2) or (3), so there are multiple combinations that can cause ref underfow. Let's address it by: - check the return value in (2) as done - change (1) to if (done) in this way, we only do ref put in (1) if 'oneshot flag' is from (2) - do poll.done check in io_poll_task_func(), so that we won't put ref for the second time. Signed-off-by: Hao Xu <haoxu@linux.alibaba.com> Link: https://lore.kernel.org/r/20210922101238.7177-4-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ac0c06d5c629..7707cdb7b372 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5367,6 +5367,10 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
} else {
bool done;
+ if (req->poll.done) {
+ spin_unlock(&ctx->completion_lock);
+ return;
+ }
done = __io_poll_complete(req, req->result);
if (done) {
io_poll_remove_double(req);
@@ -5830,6 +5834,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
__poll_t mask;
+ bool done;
ipt.pt._qproc = io_poll_queue_proc;
@@ -5838,13 +5843,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(req, mask);
+ done = io_poll_complete(req, mask);
}
spin_unlock(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- if (poll->events & EPOLLONESHOT)
+ if (done)
io_put_req(req);
}
return ipt.error;