From: Chen Shang micro-optimize task requeueing in schedule() & clean up recalc_task_prio(). Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- kernel/sched.c | 19 ++++++++++++------- 1 files changed, 12 insertions(+), 7 deletions(-) diff -puN kernel/sched.c~sched-micro-optimize-task-requeueing-in-schedule kernel/sched.c --- 25/kernel/sched.c~sched-micro-optimize-task-requeueing-in-schedule Mon May 23 16:01:42 2005 +++ 25-akpm/kernel/sched.c Mon May 23 16:01:42 2005 @@ -675,7 +675,7 @@ static inline void __activate_idle_task( rq->nr_running++; } -static void recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(task_t *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long long __sleep_time = now - p->timestamp; @@ -734,7 +734,7 @@ static void recalc_task_prio(task_t *p, } } - p->prio = effective_prio(p); + return effective_prio(p); } /* @@ -757,7 +757,7 @@ static void activate_task(task_t *p, run } #endif - recalc_task_prio(p, now); + p->prio = recalc_task_prio(p, now); /* * This checks to make sure it's not an uninterruptible task @@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void) struct list_head *queue; unsigned long long now; unsigned long run_time; - int cpu, idx; + int cpu, idx, new_prio; /* * Test if we are atomic. Since do_exit() needs to call into @@ -2873,9 +2873,14 @@ go_idle: delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; array = next->array; - dequeue_task(next, array); - recalc_task_prio(next, next->timestamp + delta); - enqueue_task(next, array); + new_prio = recalc_task_prio(next, next->timestamp + delta); + + if (unlikely(next->prio != new_prio)) { + dequeue_task(next, array); + next->prio = new_prio; + enqueue_task(next, array); + } else + requeue_task(next, array); } next->activated = 0; switch_tasks: _