summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-06-28 11:06:05 -0600
committerJens Axboe <axboe@kernel.dk>2023-06-28 11:06:05 -0600
commitdfbe5561ae9339516a3742a3fbd678609ad59fd0 (patch)
treeb8b9c27149c8a424da8220b081a2e5d5a70d9fe9 /io_uring
parent10e1c0d59006c6492d380602aa0a6c4eb9441426 (diff)
io_uring: flush offloaded and delayed task_work on exit
io_uring offloads task_work for cancelation purposes when the task is exiting. This is conceptually fine, but we should be nicer and actually wait for that work to complete before returning. Add an argument to io_fallback_tw() telling it to flush the deferred work when it's all queued up, and have it flush a ctx behind whenever the ctx changes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f84d258ea348..e8096d502a7c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1237,18 +1237,32 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
return cmpxchg(&head->first, old, new);
}
-static __cold void io_fallback_tw(struct io_uring_task *tctx)
+static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
{
struct llist_node *node = llist_del_all(&tctx->task_list);
+ struct io_ring_ctx *last_ctx = NULL;
struct io_kiocb *req;
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
+ if (sync && last_ctx != req->ctx) {
+ if (last_ctx) {
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
+ last_ctx = req->ctx;
+ percpu_ref_get(&last_ctx->refs);
+ }
if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
}
+
+ if (last_ctx) {
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
}
void tctx_task_work(struct callback_head *cb)
@@ -1263,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb)
unsigned int count = 0;
if (unlikely(current->flags & PF_EXITING)) {
- io_fallback_tw(tctx);
+ io_fallback_tw(tctx, true);
return;
}
@@ -1358,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
- io_fallback_tw(tctx);
+ io_fallback_tw(tctx, false);
}
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
@@ -3108,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
if (ctx->rings)
io_kill_timeouts(ctx, NULL, true);
+ flush_delayed_work(&ctx->fallback_work);
+
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
* Use system_unbound_wq to avoid spawning tons of event kworkers