summaryrefslogtreecommitdiff
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-26 13:48:19 -0700
committerJens Axboe <axboe@kernel.dk>2021-03-04 06:34:39 -0700
commitafcc4015d1bf5659b8c722aff679e9b8c41ee156 (patch)
tree107b0e65a3d2e5218cd1967b71f3e07d38856839 /fs/io-wq.c
parent8629397e6e2753bb4cc62ba48a12e1d4d912b6a4 (diff)
io-wq: provide an io_wq_put_and_exit() helper
If we put the io-wq from io_uring, we really want it to exit. Provide a helper that does that for us. Couple that with not having the manager hold a reference to the 'wq' and the normal SQPOLL exit will tear down the io-wq context appropriate. On the io-wq side, our wq context is per task, so only the task itself is manipulating ->manager and hence it's safe to check and clear without any extra locking. We just need to ensure that the manager task stays around, in case it exits. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index f0b7e9ff63fa..1407ba74ffc3 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -749,7 +749,7 @@ static int io_wq_manager(void *data)
sprintf(buf, "iou-mgr-%d", wq->task_pid);
set_task_comm(current, buf);
current->flags |= PF_IO_WORKER;
- wq->manager = current;
+ wq->manager = get_task_struct(current);
complete(&wq->started);
@@ -771,9 +771,7 @@ static int io_wq_manager(void *data)
/* we might not ever have created any workers */
if (atomic_read(&wq->worker_refs))
wait_for_completion(&wq->worker_done);
- wq->manager = NULL;
complete(&wq->exited);
- io_wq_put(wq);
do_exit(0);
}
@@ -816,8 +814,6 @@ static int io_wq_fork_manager(struct io_wq *wq)
return 0;
reinit_completion(&wq->worker_done);
- clear_bit(IO_WQ_BIT_EXIT, &wq->state);
- refcount_inc(&wq->refs);
current->flags |= PF_IO_WORKER;
ret = io_wq_fork_thread(io_wq_manager, wq);
current->flags &= ~PF_IO_WORKER;
@@ -1089,6 +1085,16 @@ err_wq:
return ERR_PTR(ret);
}
+static void io_wq_destroy_manager(struct io_wq *wq)
+{
+ if (wq->manager) {
+ wake_up_process(wq->manager);
+ wait_for_completion(&wq->exited);
+ put_task_struct(wq->manager);
+ wq->manager = NULL;
+ }
+}
+
static void io_wq_destroy(struct io_wq *wq)
{
int node;
@@ -1096,10 +1102,7 @@ static void io_wq_destroy(struct io_wq *wq)
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
set_bit(IO_WQ_BIT_EXIT, &wq->state);
- if (wq->manager) {
- wake_up_process(wq->manager);
- wait_for_completion(&wq->exited);
- }
+ io_wq_destroy_manager(wq);
spin_lock_irq(&wq->hash->wait.lock);
for_each_node(node) {
@@ -1112,7 +1115,6 @@ static void io_wq_destroy(struct io_wq *wq)
io_wq_put_hash(wq->hash);
kfree(wq->wqes);
kfree(wq);
-
}
void io_wq_put(struct io_wq *wq)
@@ -1121,6 +1123,13 @@ void io_wq_put(struct io_wq *wq)
io_wq_destroy(wq);
}
+void io_wq_put_and_exit(struct io_wq *wq)
+{
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ io_wq_destroy_manager(wq);
+ io_wq_put(wq);
+}
+
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
{
struct task_struct *task = worker->task;