aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Theurer <habanero@us.ibm.com>2005-01-07 21:44:05 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:44:05 -0800
commitf16c759b750250439dd769183d2a41174bb18519 (patch)
tree3bd693cca9f0f3506480bbb50e14a6b35c426417 /kernel
parent39a488d12d04e804506096c8d5333cc4a9b2db11 (diff)
downloadhistory-f16c759b750250439dd769183d2a41174bb18519.tar.gz
[PATCH] sched: more agressive wake_idle()
This patch addresses some problems with wake_idle(). Currently wake_idle() will wake a task on an alternate cpu if: 1) task->cpu is not idle 2) an idle cpu can be found However the span of cpus to look for is very limited (only the task->cpu's sibling). The scheduler should find the closest idle cpu, starting with the lowest level domain, then going to higher level domains if allowed (doamin has flag SD_WAKE_IDLE). This patch does this. This and the other two patches (also to be submitted) combined have provided as much at 5% improvement on that "online transaction DB workload" and 2% on the industry standard J@EE workload. I asked Martin Bligh to test these for regression, and he did not find any. I would like to submit for inclusion to -mm and barring any problems eventually to mainline. Signed-off-by: <habanero@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9f1e93a2bf0efb..a017d690dbfa47 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -935,9 +935,10 @@ static inline unsigned long target_load(int cpu)
#endif
/*
- * wake_idle() is useful especially on SMT architectures to wake a
- * task onto an idle sibling if we would otherwise wake it onto a
- * busy sibling.
+ * wake_idle() will wake a task on an idle cpu if task->cpu is
+ * not idle and an idle cpu is available. The span of cpus to
+ * search starts with cpus closest then further out as needed,
+ * so we always favor a closer, idle cpu.
*
* Returns the CPU we should wake onto.
*/
@@ -945,24 +946,23 @@ static inline unsigned long target_load(int cpu)
static int wake_idle(int cpu, task_t *p)
{
cpumask_t tmp;
- runqueue_t *rq = cpu_rq(cpu);
struct sched_domain *sd;
int i;
if (idle_cpu(cpu))
return cpu;
- sd = rq->sd;
- if (!(sd->flags & SD_WAKE_IDLE))
- return cpu;
-
- cpus_and(tmp, sd->span, p->cpus_allowed);
-
- for_each_cpu_mask(i, tmp) {
- if (idle_cpu(i))
- return i;
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_IDLE) {
+ cpus_and(tmp, sd->span, cpu_online_map);
+ cpus_and(tmp, tmp, p->cpus_allowed);
+ for_each_cpu_mask(i, tmp) {
+ if (idle_cpu(i))
+ return i;
+ }
+ }
+ else break;
}
-
return cpu;
}
#else
@@ -1074,7 +1074,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
out_set_cpu:
schedstat_inc(rq, ttwu_attempts);
new_cpu = wake_idle(new_cpu, p);
- if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
+ if (new_cpu != cpu) {
schedstat_inc(rq, ttwu_moved);
set_task_cpu(p, new_cpu);
task_rq_unlock(rq, &flags);