summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorKamalesh Babulal <kamalesh@linux.vnet.ibm.com>2013-11-15 15:06:52 +0530
committerIngo Molnar <mingo@kernel.org>2013-11-27 13:50:57 +0100
commit380c9077b38df2962a22f00f21f6cd0db62d3390 (patch)
tree1f6ab35a05616e8de2f0fc9208be3d7c21f9e305 /kernel/sched
parentc44f2a020072d75d6b0cbf9f139a09719cda9367 (diff)
sched/fair: Clean up update_sg_lb_stats() a bit
Add rq->nr_running to sgs->sum_nr_running directly instead of assigning it through an intermediate variable nr_running. Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1384508212-25032-1-git-send-email-kamalesh@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6cb36c7ea391..a566c0739f77 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5500,7 +5500,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
int local_group, struct sg_lb_stats *sgs)
{
- unsigned long nr_running;
unsigned long load;
int i;
@@ -5509,8 +5508,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
- nr_running = rq->nr_running;
-
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
@@ -5518,7 +5515,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
load = source_load(i, load_idx);
sgs->group_load += load;
- sgs->sum_nr_running += nr_running;
+ sgs->sum_nr_running += rq->nr_running;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;