diff options
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 81 |
1 files changed, 57 insertions, 24 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index b07196b4511c..d5ab0e9a3f29 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { struct io_kiocb *req; @@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static bool io_match_linked(struct io_kiocb *head) +{ + struct io_kiocb *req; + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; + } + return false; +} + +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) +{ + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; +} + static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; @@ -1502,10 +1541,10 @@ static void io_prep_async_link(struct io_kiocb *req) if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; - spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); io_for_each_link(cur, req) io_prep_async_work(cur); - spin_unlock(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); } else { io_for_each_link(cur, req) io_prep_async_work(cur); @@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -5704,7 +5744,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, cancel_all)) + if (io_match_task_safe(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } @@ -6156,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; + if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) + return -EINVAL; + data->mode = io_translate_timeout_mode(flags); hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); @@ -6880,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx, static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) { struct io_kiocb *prev = req->timeout.prev; - int ret; + int ret = -ENOENT; if (prev) { - ret = io_try_cancel_userdata(req, prev->user_data); + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { @@ -9255,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) struct io_buffer *buf; unsigned long index; - xa_for_each(&ctx->io_buffers, index, buf) { + xa_for_each(&ctx->io_buffers, index, buf) __io_remove_buffers(ctx, buf, index, -1U); - cond_resched(); - } } static void io_req_caches_free(struct io_ring_ctx *ctx) @@ -9562,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - - if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { - struct io_ring_ctx *ctx = req->ctx; - /* protect against races with linked timeouts */ - spin_lock(&ctx->completion_lock); - ret = io_match_task(req, cancel->task, cancel->all); - spin_unlock(&ctx->completion_lock); - } else { - ret = io_match_task(req, cancel->task, cancel->all); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -9586,7 +9617,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, cancel_all)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } @@ -9764,7 +9795,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx) } if (wq) { /* - * Must be after io_uring_del_task_file() (removes nodes under + * Must be after io_uring_del_tctx_node() (removes nodes under * uring_lock) to avoid race with io_uring_try_cancel_iowq(). */ io_wq_put_and_exit(wq); @@ -9793,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task) /* * Find any io_uring ctx that this task has registered or done IO on, and cancel - * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation. + * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. */ static __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) @@ -9835,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all, cancel_all); } - prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); + io_run_task_work(); io_uring_drop_tctx_refs(current); + /* * If we've seen completions, retry without waiting. This * avoids a race where a completion comes in before we did |