diff options
author | Matthew Dobson <colpatch@us.ibm.com> | 2005-01-07 21:44:51 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-01-07 21:44:51 -0800 |
commit | fbdaac3d81774c2003ac0025d33dd1142ab3d1b3 (patch) | |
tree | 632b877c19f48a11e3bf9e889b893654e388539f /kernel | |
parent | 86b28714e0d56c203a4c8cfccc37535fcc073842 (diff) | |
download | history-fbdaac3d81774c2003ac0025d33dd1142ab3d1b3.tar.gz |
[PATCH] sched: active_load_balance() fixlet
There is a small problem with the active_load_balance() patch that Darren
sent out last week. As soon as we discover a potential 'target_cpu' from
'cpu_group' to try to push tasks to, we cease considering other CPUs in
that group as potential 'target_cpu's. We break out of the
for_each_cpu_mask() loop and try to push tasks to that CPU. The problem is
that there may well be other idle cpus in that group that we should also
try to push tasks to. Here is a patch to fix that small problem. The
solution is to simply move the code that tries to push the tasks into the
for_each_cpu_mask() loop and do away with the whole 'target_cpu' thing
entirely. Compiled & booted on a 16-way x440.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 65 |
1 files changed, 32 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d66bdb9bb2784a..b2c8d52ad8eb34 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2122,7 +2122,9 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) { struct sched_domain *sd; struct sched_group *cpu_group; + runqueue_t *target_rq; cpumask_t visited_cpus; + int cpu; schedstat_inc(busiest_rq, alb_cnt); /* @@ -2131,46 +2133,43 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) */ visited_cpus = CPU_MASK_NONE; for_each_domain(busiest_cpu, sd) { - if (!(sd->flags & SD_LOAD_BALANCE) || busiest_rq->nr_running <= 1) - break; /* no more domains to search or no more tasks to move */ + if (!(sd->flags & SD_LOAD_BALANCE)) + /* no more domains to search */ + break; cpu_group = sd->groups; - do { /* sched_groups should either use list_heads or be merged into the domains structure */ - int cpu, target_cpu = -1; - runqueue_t *target_rq; - + do { for_each_cpu_mask(cpu, cpu_group->cpumask) { - if (cpu_isset(cpu, visited_cpus) || cpu == busiest_cpu || - !cpu_and_siblings_are_idle(cpu)) { - cpu_set(cpu, visited_cpus); + if (busiest_rq->nr_running <= 1) + /* no more tasks left to move */ + return; + if (cpu_isset(cpu, visited_cpus)) + continue; + cpu_set(cpu, visited_cpus); + if (!cpu_and_siblings_are_idle(cpu) || cpu == busiest_cpu) continue; - } - target_cpu = cpu; - break; - } - if (target_cpu == -1) - goto next_group; /* failed to find a suitable target cpu in this domain */ - - target_rq = cpu_rq(target_cpu); - /* - * This condition is "impossible", if it occurs we need to fix it - * Reported by Bjorn Helgaas on a 128-cpu setup. - */ - BUG_ON(busiest_rq == target_rq); - - /* move a task from busiest_rq to target_rq */ - double_lock_balance(busiest_rq, target_rq); - if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE)) { - schedstat_inc(busiest_rq, alb_lost); - schedstat_inc(target_rq, alb_gained); - } else { - schedstat_inc(busiest_rq, alb_failed); + target_rq = cpu_rq(cpu); + /* + * This condition is "impossible", if it occurs + * we need to fix it. Originally reported by + * Bjorn Helgaas on a 128-cpu setup. + */ + BUG_ON(busiest_rq == target_rq); + + /* move a task from busiest_rq to target_rq */ + double_lock_balance(busiest_rq, target_rq); + if (move_tasks(target_rq, cpu, busiest_rq, + 1, sd, SCHED_IDLE)) { + schedstat_inc(busiest_rq, alb_lost); + schedstat_inc(target_rq, alb_gained); + } else { + schedstat_inc(busiest_rq, alb_failed); + } + spin_unlock(&target_rq->lock); } - spin_unlock(&target_rq->lock); -next_group: cpu_group = cpu_group->next; - } while (cpu_group != sd->groups && busiest_rq->nr_running > 1); + } while (cpu_group != sd->groups); } } |