From: John Hawkes Here is a patch against 2.6.1-mm3 that I believe restores the "sched_clock fix" from -mm2. I've also fixed the declaration of timestamp_last_tick to be "unsigned long long", to match task->timestamp and sched_clock(). --- 25-akpm/kernel/sched.c | 16 ++++++---------- 1 files changed, 6 insertions(+), 10 deletions(-) diff -puN kernel/sched.c~sched-clock-fixes kernel/sched.c --- 25/kernel/sched.c~sched-clock-fixes Thu Mar 11 14:33:25 2004 +++ 25-akpm/kernel/sched.c Thu Mar 11 14:33:25 2004 @@ -204,8 +204,8 @@ struct prio_array { struct runqueue { spinlock_t lock; unsigned long long nr_switches; - unsigned long nr_running, expired_timestamp, nr_uninterruptible, - timestamp_last_tick; + unsigned long nr_running, expired_timestamp, nr_uninterruptible; + unsigned long long timestamp_last_tick; task_t *curr, *idle; struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; @@ -1252,8 +1252,7 @@ static inline void pull_task(runqueue_t */ static inline int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, - unsigned long long now, struct sched_domain *domain, - enum idle_type idle) + struct sched_domain *domain, enum idle_type idle) { /* * We do not migrate tasks that are: @@ -1269,8 +1268,8 @@ int can_migrate_task(task_t *p, runqueue /* Aggressive migration if we've failed balancing */ if (idle == NEWLY_IDLE || domain->nr_balance_failed < domain->cache_nice_tries) { - unsigned long long delta = now - p->timestamp; - if (delta < domain->cache_hot_time) + if ((rq->timestamp_last_tick - p->timestamp) + < domain->cache_hot_time) return 0; } @@ -1288,7 +1287,6 @@ static int move_tasks(runqueue_t *this_r unsigned long max_nr_move, struct sched_domain *domain, enum idle_type idle) { - unsigned long long now; int idx; int pulled = 0; prio_array_t *array, *dst_array; @@ -1298,8 +1296,6 @@ static int move_tasks(runqueue_t *this_r if (max_nr_move <= 0 || busiest->nr_running <= 1) goto out; - now = sched_clock(); - /* * We first consider expired tasks. Those will likely not be * executed in the near future, and they are most likely to @@ -1338,7 +1334,7 @@ skip_queue: curr = curr->prev; - if (!can_migrate_task(tmp, busiest, this_cpu, now, domain, idle)) { + if (!can_migrate_task(tmp, busiest, this_cpu, domain, idle)) { if (curr != head) goto skip_queue; idx++; _