sched/fair: Clean up update_sg_lb_stats() a bit
Add rq->nr_running to sgs->sum_nr_running directly instead of assigning it through an intermediate variable nr_running. Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1384508212-25032-1-git-send-email-kamalesh@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c44f2a0200
commit
380c9077b3
@ -5500,7 +5500,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
||||
struct sched_group *group, int load_idx,
|
||||
int local_group, struct sg_lb_stats *sgs)
|
||||
{
|
||||
unsigned long nr_running;
|
||||
unsigned long load;
|
||||
int i;
|
||||
|
||||
@ -5509,8 +5508,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
||||
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
||||
struct rq *rq = cpu_rq(i);
|
||||
|
||||
nr_running = rq->nr_running;
|
||||
|
||||
/* Bias balancing toward cpus of our domain */
|
||||
if (local_group)
|
||||
load = target_load(i, load_idx);
|
||||
@ -5518,7 +5515,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
||||
load = source_load(i, load_idx);
|
||||
|
||||
sgs->group_load += load;
|
||||
sgs->sum_nr_running += nr_running;
|
||||
sgs->sum_nr_running += rq->nr_running;
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
sgs->nr_numa_running += rq->nr_numa_running;
|
||||
sgs->nr_preferred_running += rq->nr_preferred_running;
|
||||
|
Loading…
Reference in New Issue
Block a user