diff -urN linux-2.4.19-pre7-ac2/include/linux/sched.h linux/include/linux/sched.h --- linux-2.4.19-pre7-ac2/include/linux/sched.h Fri Apr 19 20:58:14 2002 +++ linux/include/linux/sched.h Fri Apr 19 20:58:51 2002 @@ -914,6 +914,31 @@ return res; } +static inline void set_need_resched(void) +{ + current->need_resched = 1; +} + +static inline void clear_need_resched(void) +{ + current->need_resched = 0; +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 1; +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 0; +} + +static inline int need_resched(void) +{ + return unlikely(current->need_resched); +} + #endif /* __KERNEL__ */ #endif diff -urN linux-2.4.19-pre7-ac2/kernel/sched.c linux/kernel/sched.c --- linux-2.4.19-pre7-ac2/kernel/sched.c Fri Apr 19 20:58:14 2002 +++ linux/kernel/sched.c Fri Apr 19 20:59:45 2002 @@ -253,7 +253,7 @@ need_resched = p->need_resched; wmb(); - p->need_resched = 1; + set_tsk_need_resched(p); if (!need_resched && (p->cpu != smp_processor_id())) smp_send_reschedule(p->cpu); } @@ -612,7 +612,7 @@ this_rq->nr_running++; enqueue_task(next, this_rq->active); if (next->prio < current->prio) - current->need_resched = 1; + set_need_resched(); if (!idle && --imbalance) { if (array == busiest->expired) { array = busiest->active; @@ -686,7 +686,7 @@ /* Task might have expired already, but not scheduled off yet */ if (p->array != rq->active) { - p->need_resched = 1; + set_tsk_need_resched(p); return; } spin_lock(&rq->lock); @@ -697,7 +697,7 @@ */ if ((p->policy == SCHED_RR) && !--p->time_slice) { p->time_slice = TASK_TIMESLICE(p); - p->need_resched = 1; + set_tsk_need_resched(p); /* put it at the end of the queue: */ dequeue_task(p, rq->active); @@ -717,7 +717,7 @@ p->sleep_avg--; if (!--p->time_slice) { dequeue_task(p, rq->active); - p->need_resched = 1; + set_tsk_need_resched(p); p->prio = effective_prio(p); p->time_slice = TASK_TIMESLICE(p); @@ -797,7 +797,7 @@ switch_tasks: prefetch(next); - prev->need_resched = 0; + clear_tsk_need_resched(prev); if (likely(prev != next)) { rq->nr_switches++; @@ -1464,7 +1464,7 @@ idle->state = TASK_RUNNING; idle->cpu = cpu; double_rq_unlock(idle_rq, rq); - idle->need_resched = 1; + set_tsk_need_resched(idle); __restore_flags(flags); }