From: Nick Piggin It removes some code that slightly shortcuts __activate_task in one case, at the expense of possibly giving a newly forked process the wrong priority for its first timeslice. But by the time you account for the extra couple of icache lines it requires, there is probably little improvement. It also restores a couple of if (TASK_PREEMPTS_CURR) resched_task points in the wake_up_forked paths that got lost. --- 25-akpm/kernel/sched.c | 31 +++++++------------------------ 1 files changed, 7 insertions(+), 24 deletions(-) diff -puN kernel/sched.c~sched-fixes kernel/sched.c --- 25/kernel/sched.c~sched-fixes 2004-04-18 08:30:23.688497968 -0700 +++ 25-akpm/kernel/sched.c 2004-04-18 08:31:20.197907232 -0700 @@ -941,15 +941,10 @@ void fastcall wake_up_forked_process(tas p->prio = effective_prio(p); set_task_cpu(p, smp_processor_id()); - if (unlikely(!current->array)) - __activate_task(p, rq); - else { - p->prio = current->prio; - list_add_tail(&p->run_list, ¤t->run_list); - p->array = current->array; - p->array->nr_active++; - rq->nr_running++; - } + __activate_task(p, rq); + if (TASK_PREEMPTS_CURR(p, rq)) + resched_task(rq->curr); + task_rq_unlock(rq, &flags); } @@ -1262,21 +1257,9 @@ lock_again: p->prio = effective_prio(p); set_task_cpu(p, cpu); - if (cpu == this_cpu) { - if (unlikely(!current->array)) - __activate_task(p, rq); - else { - p->prio = current->prio; - list_add_tail(&p->run_list, ¤t->run_list); - p->array = current->array; - p->array->nr_active++; - rq->nr_running++; - } - } else { - __activate_task(p, rq); - if (TASK_PREEMPTS_CURR(p, rq)) - resched_task(rq->curr); - } + __activate_task(p, rq); + if (TASK_PREEMPTS_CURR(p, rq)) + resched_task(rq->curr); double_rq_unlock(this_rq, rq); local_irq_restore(flags); _