aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Theurer <habanero@us.ibm.com>2005-01-07 21:44:22 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:44:22 -0800
commit6a991f77ef744253cd108d9442eeb85c734cb489 (patch)
treeedf0b4463bd33db8c98cd4fb9f9343cff7d6e82b /kernel
parentf16c759b750250439dd769183d2a41174bb18519 (diff)
downloadhistory-6a991f77ef744253cd108d9442eeb85c734cb489.tar.gz
[PATCH] sched: can_migrate exception for idle cpus
Fix can_migrate to allow aggressive steal for idle cpus. This -was- in mainline, but I believe sched_domains kind of blasted it outta there. IMO, it's a no brainer for an idle cpu (with all that cache going to waste) to be granted to steal a task. The one enhancement I have made was to make sure the whole cpu was idle. Signed-off-by: <habanero@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a017d690dbfa47..d66bdb9bb2784a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -455,6 +455,22 @@ static inline void rq_unlock(runqueue_t *rq)
spin_unlock_irq(&rq->lock);
}
+#ifdef CONFIG_SCHED_SMT
+static int cpu_and_siblings_are_idle(int cpu)
+{
+ int sib;
+ for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
+ if (idle_cpu(sib))
+ continue;
+ return 0;
+ }
+
+ return 1;
+}
+#else
+#define cpu_and_siblings_are_idle(A) idle_cpu(A)
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/*
* Called when a process is dequeued from the active array and given
@@ -1668,13 +1684,18 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
if (!cpu_isset(this_cpu, p->cpus_allowed))
return 0;
- /* Aggressive migration if we've failed balancing */
- if (idle == NEWLY_IDLE ||
- sd->nr_balance_failed < sd->cache_nice_tries) {
- if (task_hot(p, rq->timestamp_last_tick, sd))
- return 0;
- }
+ /*
+ * Aggressive migration if:
+ * 1) the [whole] cpu is idle, or
+ * 2) too many balance attempts have failed.
+ */
+ if (cpu_and_siblings_are_idle(this_cpu) || \
+ sd->nr_balance_failed > sd->cache_nice_tries)
+ return 1;
+
+ if (task_hot(p, rq->timestamp_last_tick, sd))
+ return 0;
return 1;
}
@@ -2089,23 +2110,6 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
}
}
-#ifdef CONFIG_SCHED_SMT
-static int cpu_and_siblings_are_idle(int cpu)
-{
- int sib;
- for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
- if (idle_cpu(sib))
- continue;
- return 0;
- }
-
- return 1;
-}
-#else
-#define cpu_and_siblings_are_idle(A) idle_cpu(A)
-#endif
-
-
/*
* active_load_balance is run by migration threads. It pushes running tasks
* off the busiest CPU onto idle CPUs. It requires at least 1 task to be