diff options
author | Lai Jiangshan <laijs@linux.alibaba.com> | 2021-12-23 20:31:37 +0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2022-01-12 07:37:28 -1000 |
commit | 21b195c05cf6a6cc49777d6992772bcf01502186 (patch) | |
tree | 75a1bf2a43d192af12ed37fd7cebaccbbedc6a74 /kernel/workqueue.c | |
parent | daadb3bd0e8d3e317e36bc2c1542e86c528665e5 (diff) |
workqueue: Remove the mb() pair between wq_worker_sleeping() and insert_work()
In wq_worker_sleeping(), the access to worklist is protected by the
pool->lock, so the memory barrier is unneeded.
Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 33f1106b4f99..29b070106f34 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -918,10 +918,6 @@ void wq_worker_sleeping(struct task_struct *task) } /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). - * Please read comment there. - * * NOT_RUNNING is clear. This means that we're bound to and * running on the local cpu w/ rq lock held and preemption * disabled, which in turn means that none else could be @@ -1372,13 +1368,6 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, list_add_tail(&work->entry, head); get_pwq(pwq); - /* - * Ensure either wq_worker_sleeping() sees the above - * list_add_tail() or we see zero nr_running to avoid workers lying - * around lazily while there are works to be processed. - */ - smp_mb(); - if (__need_more_worker(pool)) wake_up_worker(pool); } |