diff options
author | Lai Jiangshan <laijs@linux.alibaba.com> | 2021-12-23 20:31:40 +0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2022-01-12 07:46:36 -1000 |
commit | bc35f7ef96284b8c963991357a9278a6beafca54 (patch) | |
tree | 40b665cfe0a8f6539bcd094aa6d21aa26078320d /kernel/workqueue.c | |
parent | cc5bff38463e0894dd596befa99f9d6860e15f5e (diff) |
workqueue: Convert the type of pool->nr_running to int
It is only modified in associated CPU, so it doesn't need to be atomic.
tj: Comment updated.
Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 16 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 69cbe9e62bf1..835d25e65bb2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -154,8 +154,13 @@ struct worker_pool { unsigned long watchdog_ts; /* L: watchdog timestamp */ - /* The current concurrency level. */ - atomic_t nr_running; + /* + * The counter is incremented in a process context on the associated CPU + * w/ preemption disabled, and decremented or reset in the same context + * but w/ pool->lock held. The readers grab pool->lock and are + * guaranteed to see if the counter reached zero. + */ + int nr_running; struct list_head worklist; /* L: list of pending works */ @@ -777,7 +782,7 @@ static bool work_is_canceling(struct work_struct *work) static bool __need_more_worker(struct worker_pool *pool) { - return !atomic_read(&pool->nr_running); + return !pool->nr_running; } /* @@ -802,8 +807,7 @@ static bool may_start_working(struct worker_pool *pool) /* Do I need to keep working? Called from currently running workers. */ static bool keep_working(struct worker_pool *pool) { - return !list_empty(&pool->worklist) && - atomic_read(&pool->nr_running) <= 1; + return !list_empty(&pool->worklist) && (pool->nr_running <= 1); } /* Do we need a new worker? Called from manager. */ @@ -873,7 +877,7 @@ void wq_worker_running(struct task_struct *task) */ preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(&worker->pool->nr_running); + worker->pool->nr_running++; preempt_enable(); worker->sleeping = 0; } @@ -917,8 +921,8 @@ void wq_worker_sleeping(struct task_struct *task) return; } - if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) + pool->nr_running--; + if (need_more_worker(pool)) wake_up_worker(pool); raw_spin_unlock_irq(&pool->lock); } @@ -973,7 +977,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) /* If transitioning into NOT_RUNNING, adjust nr_running. */ if ((flags & WORKER_NOT_RUNNING) && !(worker->flags & WORKER_NOT_RUNNING)) { - atomic_dec(&pool->nr_running); + pool->nr_running--; } worker->flags |= flags; @@ -1005,7 +1009,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) */ if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(&pool->nr_running); + pool->nr_running++; } /** @@ -1806,8 +1810,7 @@ static void worker_enter_idle(struct worker *worker) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); /* Sanity check nr_running. */ - WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && - atomic_read(&pool->nr_running)); + WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); } /** @@ -4985,7 +4988,7 @@ static void unbind_workers(int cpu) * an unbound (in terms of concurrency management) pool which * are served by workers tied to the pool. */ - atomic_set(&pool->nr_running, 0); + pool->nr_running = 0; /* * With concurrency management just turned off, a busy |