diff options
author | Jens Axboe <axboe@kernel.dk> | 2022-01-18 19:18:20 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-01-18 19:27:59 -0700 |
commit | 36e4c58bf044b07204c8c7e6dd7c2384e439921a (patch) | |
tree | b1a748ee1e548329d65e213c2356ce6e00059b45 /fs/io-wq.c | |
parent | 081b58204629eff9dd93e7f68ed15c8aa6452a4b (diff) |
io-wq: invoke work cancelation with wqe->lock held
io_wqe_cancel_pending_work() grabs it internally, grab it upfront
instead. For the running work cancelation, grab the lock around it as
well.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index c369910de793..a92fbdc8bea3 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -1038,17 +1038,16 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe, { int i; retry: - raw_spin_lock(&wqe->lock); for (i = 0; i < IO_WQ_ACCT_NR; i++) { struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); if (io_acct_cancel_pending_work(wqe, acct, match)) { + raw_spin_lock(&wqe->lock); if (match->cancel_all) goto retry; - return; + break; } } - raw_spin_unlock(&wqe->lock); } static void io_wqe_cancel_running_work(struct io_wqe *wqe, @@ -1077,7 +1076,9 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; + raw_spin_lock(&wqe->lock); io_wqe_cancel_pending_work(wqe, &match); + raw_spin_unlock(&wqe->lock); if (match.nr_pending && !match.cancel_all) return IO_WQ_CANCEL_OK; } @@ -1091,7 +1092,9 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; + raw_spin_lock(&wqe->lock); io_wqe_cancel_running_work(wqe, &match); + raw_spin_unlock(&wqe->lock); if (match.nr_running && !match.cancel_all) return IO_WQ_CANCEL_RUNNING; } @@ -1262,7 +1265,9 @@ static void io_wq_destroy(struct io_wq *wq) .fn = io_wq_work_match_all, .cancel_all = true, }; + raw_spin_lock(&wqe->lock); io_wqe_cancel_pending_work(wqe, &match); + raw_spin_unlock(&wqe->lock); free_cpumask_var(wqe->cpu_mask); kfree(wqe); } |