Patch from Mike Galbraith The change to activate_task() prevents sleepers from gaining too much advantage from long sleeps. The change to deactivate_task() prevents tasks with a high activation/deactivation rate from gaining permanent maximum bonus points (irman's process load). The change to scheduler_tick() penalizes interactive tasks who consume their timeslice such that they will be expired quicker, and must re-earn their elevated status. This "cures" all of the starvation problems I've seen here. kernel/sched.c | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletion(-) diff -puN kernel/sched.c~scheduler-starvation-fixes kernel/sched.c --- 25/kernel/sched.c~scheduler-starvation-fixes 2003-03-15 00:05:48.000000000 -0800 +++ 25-akpm/kernel/sched.c 2003-03-15 00:05:48.000000000 -0800 @@ -75,7 +75,7 @@ int exit_weight = 3; int prio_bonus_ratio = 25; int interactive_delta = 2; int max_sleep_avg = 10 * HZ; -int starvation_limit = 10 * HZ; +int starvation_limit = 2 * (200 * HZ) / 1000; int node_threshold = 125; #define MIN_TIMESLICE (min_timeslice) @@ -370,6 +370,8 @@ static inline int activate_task(task_t * * spends sleeping, the higher the average gets - and the * higher the priority boost gets as well. */ + if (sleep_time > MAX_TIMESLICE) + sleep_time = MAX_TIMESLICE; sleep_avg = p->sleep_avg + sleep_time; /* @@ -411,6 +413,8 @@ static inline void deactivate_task(struc rq->nr_uninterruptible++; dequeue_task(p, p->array); p->array = NULL; + if (p->sleep_avg) + p->sleep_avg--; } /* @@ -1263,6 +1267,8 @@ void scheduler_tick(int user_ticks, int if (!--p->time_slice) { dequeue_task(p, rq->active); set_tsk_need_resched(p); + if (TASK_INTERACTIVE(p) && p->sleep_avg > MIN_TIMESLICE) + p->sleep_avg -= MIN_TIMESLICE; p->prio = effective_prio(p); p->time_slice = task_timeslice(p); p->first_time_slice = 0; _