From: Ingo Molnar - relax synchronization of sched_clock() kernel/sched.c | 9 +++++++-- 1 files changed, 7 insertions(+), 2 deletions(-) diff -puN kernel/sched.c~sched-clock-2.6.0-A1 kernel/sched.c --- 25/kernel/sched.c~sched-clock-2.6.0-A1 2003-12-30 17:05:37.000000000 -0800 +++ 25-akpm/kernel/sched.c 2003-12-30 17:05:37.000000000 -0800 @@ -199,7 +199,7 @@ struct prio_array { struct runqueue { spinlock_t lock; unsigned long nr_running, nr_switches, expired_timestamp, - nr_uninterruptible; + nr_uninterruptible, timestamp_last_tick; task_t *curr, *idle; struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; @@ -1136,6 +1136,7 @@ static inline void pull_task(runqueue_t set_task_cpu(p, this_cpu); nr_running_inc(this_rq); enqueue_task(p, this_rq->active); + p->timestamp = sched_clock() - (src_rq->timestamp_last_tick - p->timestamp); /* * Note that idle threads have a prio of MAX_PRIO, for this test * to be always true for them. @@ -1156,7 +1157,7 @@ static inline void pull_task(runqueue_t static inline int can_migrate_task(task_t *tsk, runqueue_t *rq, int this_cpu, int idle) { - unsigned long delta = sched_clock() - tsk->timestamp; + unsigned long delta = rq->timestamp_last_tick - tsk->timestamp; if (!idle && (delta <= JIFFIES_TO_NS(cache_decay_ticks))) return 0; @@ -1364,6 +1365,8 @@ void scheduler_tick(int user_ticks, int runqueue_t *rq = this_rq(); task_t *p = current; + rq->timestamp_last_tick = sched_clock(); + if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_ticks); @@ -2647,6 +2650,8 @@ static void move_task_away(struct task_s if (p->prio < rq_dest->curr->prio) resched_task(rq_dest->curr); } + p->timestamp = rq_dest->timestamp_last_tick; + out: double_rq_unlock(this_rq(), rq_dest); local_irq_restore(flags); _