From: Dave Kleikamp This patch fixes a divide-by-zero error that I hit on a two-way i386 machine. rq->nr_running is tested to be non-zero, but may change by the time it is used in the division. Saving the value to a local variable ensures that the same value that is checked is used in the division. Signed-off-by: Dave Kleikamp Acked-by: Con Kolivas Signed-off-by: Andrew Morton --- kernel/sched.c | 14 ++++++++------ 1 files changed, 8 insertions(+), 6 deletions(-) diff -puN kernel/sched.c~sched-correct_smp_nice_bias-fix kernel/sched.c --- 25/kernel/sched.c~sched-correct_smp_nice_bias-fix Wed Aug 17 14:39:38 2005 +++ 25-akpm/kernel/sched.c Wed Aug 17 14:39:38 2005 @@ -970,15 +970,16 @@ void kick_process(task_t *p) static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); + unsigned long running = rq->nr_running; unsigned long source_load, cpu_load = rq->cpu_load[type-1], - load_now = rq->nr_running * SCHED_LOAD_SCALE; + load_now = running * SCHED_LOAD_SCALE; if (type == 0) source_load = load_now; else source_load = min(cpu_load, load_now); - if (rq->nr_running > 1 || (idle == NOT_IDLE && rq->nr_running)) + if (running > 1 || (idle == NOT_IDLE && running)) /* * If we are busy rebalancing the load is biased by * priority to create 'nice' support across cpus. When @@ -987,7 +988,7 @@ static inline unsigned long __source_loa * prevent idle rebalance from trying to pull tasks from a * queue with only one running task. */ - source_load = source_load * rq->prio_bias / rq->nr_running; + source_load = source_load * rq->prio_bias / running; return source_load; } @@ -1003,16 +1004,17 @@ static inline unsigned long source_load( static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); + unsigned long running = rq->nr_running; unsigned long target_load, cpu_load = rq->cpu_load[type-1], - load_now = rq->nr_running * SCHED_LOAD_SCALE; + load_now = running * SCHED_LOAD_SCALE; if (type == 0) target_load = load_now; else target_load = max(cpu_load, load_now); - if (rq->nr_running > 1 || (idle == NOT_IDLE && rq->nr_running)) - target_load = target_load * rq->prio_bias / rq->nr_running; + if (running > 1 || (idle == NOT_IDLE && running)) + target_load = target_load * rq->prio_bias / running; return target_load; } _