summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-06-17 18:14:06 +0100
committerJens Axboe <axboe@kernel.dk>2021-06-18 09:22:02 -0600
commit3f18407dc6f2db0968daaa36c39a772c2c9f8ea7 (patch)
treefd0dab586ca24786e37e72511312a8b372e8d187 /fs/io_uring.c
parenta3dbdf54da80326fd12bc11ad75ecd699a82374f (diff)
io_uring: inline __tctx_task_work()
Inline __tctx_task_work() into tctx_task_work() in preparation for further optimisations. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/f9c05c4bc9763af7bd8e25ebc3c5f7b6f69148f8.1623949695.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c41e9a925fa2..dc71850d7a49 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1888,48 +1888,43 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
percpu_ref_put(&ctx->refs);
}
-static bool __tctx_task_work(struct io_uring_task *tctx)
-{
- struct io_ring_ctx *ctx = NULL;
- struct io_wq_work_list list;
- struct io_wq_work_node *node;
-
- if (wq_list_empty(&tctx->task_list))
- return false;
-
- spin_lock_irq(&tctx->task_lock);
- list = tctx->task_list;
- INIT_WQ_LIST(&tctx->task_list);
- spin_unlock_irq(&tctx->task_lock);
-
- node = list.first;
- while (node) {
- struct io_wq_work_node *next = node->next;
- struct io_kiocb *req;
-
- req = container_of(node, struct io_kiocb, io_task_work.node);
- if (req->ctx != ctx) {
- ctx_flush_and_put(ctx);
- ctx = req->ctx;
- percpu_ref_get(&ctx->refs);
- }
-
- req->task_work.func(&req->task_work);
- node = next;
- }
-
- ctx_flush_and_put(ctx);
- return list.first != NULL;
-}
-
static void tctx_task_work(struct callback_head *cb)
{
- struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
+ struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
+ task_work);
clear_bit(0, &tctx->task_state);
- while (__tctx_task_work(tctx))
+ while (!wq_list_empty(&tctx->task_list)) {
+ struct io_ring_ctx *ctx = NULL;
+ struct io_wq_work_list list;
+ struct io_wq_work_node *node;
+
+ spin_lock_irq(&tctx->task_lock);
+ list = tctx->task_list;
+ INIT_WQ_LIST(&tctx->task_list);
+ spin_unlock_irq(&tctx->task_lock);
+
+ node = list.first;
+ while (node) {
+ struct io_wq_work_node *next = node->next;
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ io_task_work.node);
+
+ if (req->ctx != ctx) {
+ ctx_flush_and_put(ctx);
+ ctx = req->ctx;
+ percpu_ref_get(&ctx->refs);
+ }
+ req->task_work.func(&req->task_work);
+ node = next;
+ }
+
+ ctx_flush_and_put(ctx);
+ if (!list.first)
+ break;
cond_resched();
+ }
}
static int io_req_task_work_add(struct io_kiocb *req)