From: Con Kolivas Much simpler include/linux/sched.h | 1 - kernel/sched.c | 38 +++++++++----------------------------- 2 files changed, 9 insertions(+), 30 deletions(-) diff -puN include/linux/sched.h~o16.2int include/linux/sched.h --- 25/include/linux/sched.h~o16.2int 2003-08-23 13:57:44.000000000 -0700 +++ 25-akpm/include/linux/sched.h 2003-08-23 13:57:44.000000000 -0700 @@ -378,7 +378,6 @@ struct task_struct { */ struct task_struct *real_parent; /* real parent process (when being debugged) */ struct task_struct *parent; /* parent process */ - struct task_struct *waker; /* waker process */ struct list_head children; /* list of my children */ struct list_head sibling; /* linkage in my parent's children list */ struct task_struct *group_leader; diff -puN kernel/sched.c~o16.2int kernel/sched.c --- 25/kernel/sched.c~o16.2int 2003-08-23 13:57:44.000000000 -0700 +++ 25-akpm/kernel/sched.c 2003-08-23 13:57:44.000000000 -0700 @@ -143,6 +143,9 @@ #define VARYING_CREDIT(p) \ (!(HIGH_CREDIT(p) || LOW_CREDIT(p))) +#define TASK_PREEMPTS_CURR(p, rq) \ + ((p)->prio < (rq)->curr->prio) + /* * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] * to time slice values. @@ -463,24 +466,15 @@ static inline void activate_task(task_t * of time they spend on the runqueue, waiting for execution * on a CPU, first time around: */ - if (in_interrupt()){ + if (in_interrupt()) p->activated = 2; - p->waker = p; - } else { + else /* * Normal first-time wakeups get a credit too for on-runqueue * time, but it will be weighted down: */ p->activated = 1; - p->waker = current; } - } else { - if (in_interrupt()) - p->waker = p; - else - p->waker = current; - } - p->timestamp = now; __activate_task(p, rq); @@ -564,20 +558,6 @@ repeat: } #endif -static inline int task_preempts_curr(task_t *p, runqueue_t *rq) -{ - if (p->prio < rq->curr->prio) { - /* - * Prevent a task preempting it's own waker - * to avoid starvation - */ - if (unlikely(rq->curr == p->waker)) - return 0; - return 1; - } - return 0; -} - /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread @@ -629,8 +609,9 @@ repeat_lock_task: __activate_task(p, rq); else { activate_task(p, rq); - if (task_preempts_curr(p, rq)) - resched_task(rq->curr); + if (TASK_PREEMPTS_CURR(p, rq) && + (in_interrupt() || !p->mm)) + resched_task(rq->curr); } success = 1; } @@ -684,7 +665,6 @@ void wake_up_forked_process(task_t * p) p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) * CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); - p->waker = p->parent; p->interactive_credit = 0; p->prio = effective_prio(p); @@ -1131,7 +1111,7 @@ static inline void pull_task(runqueue_t * Note that idle threads have a prio of MAX_PRIO, for this test * to be always true for them. */ - if (task_preempts_curr(p, this_rq)) + if (TASK_PREEMPTS_CURR(p, this_rq)) set_need_resched(); } _