From: Nick Piggin John Hawkes explained the problem best: A large number of processes that are pinned to a single CPU results in every other CPU's load_balance() seeing this overloaded CPU as "busiest", yet move_tasks() never finds a task to pull-migrate. This condition occurs during module unload, but can also occur as a denial-of-service using sys_sched_setaffinity(). Several hundred CPUs performing this fruitless load_balance() will livelock on the busiest CPU's runqueue lock. A smaller number of CPUs will livelock if the pinned task count gets high. Expanding slightly on John's patch, this one attempts to work out whether the balancing failure has been due to too many tasks pinned on the runqueue. This allows it to be basically invisible to the regular blancing paths (ie. when there are no pinned tasks). We can use this extra knowledge to shut down the balancing faster, and ensure the migration threads don't start running which is another problem observed in the wild. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- 25-akpm/kernel/sched.c | 32 ++++++++++++++++++++++---------- 1 files changed, 22 insertions(+), 10 deletions(-) diff -puN kernel/sched.c~sched-improve-pinned-task-handling kernel/sched.c --- 25/kernel/sched.c~sched-improve-pinned-task-handling Thu Mar 10 15:45:47 2005 +++ 25-akpm/kernel/sched.c Thu Mar 10 15:46:31 2005 @@ -1633,7 +1633,7 @@ void pull_task(runqueue_t *src_rq, prio_ */ static inline int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, - struct sched_domain *sd, enum idle_type idle) + struct sched_domain *sd, enum idle_type idle, int *pinned) { /* * We do not migrate tasks that are: @@ -1643,8 +1643,10 @@ int can_migrate_task(task_t *p, runqueue */ if (task_running(rq, p)) return 0; - if (!cpu_isset(this_cpu, p->cpus_allowed)) + if (!cpu_isset(this_cpu, p->cpus_allowed)) { + *pinned++; return 0; + } /* * Aggressive migration if: @@ -1670,11 +1672,11 @@ int can_migrate_task(task_t *p, runqueue */ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, unsigned long max_nr_move, struct sched_domain *sd, - enum idle_type idle) + enum idle_type idle, int *all_pinned) { prio_array_t *array, *dst_array; struct list_head *head, *curr; - int idx, pulled = 0; + int idx, pulled = 0, pinned = 0; task_t *tmp; if (max_nr_move <= 0 || busiest->nr_running <= 1) @@ -1718,7 +1720,7 @@ skip_queue: curr = curr->prev; - if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) { + if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { if (curr != head) goto skip_queue; idx++; @@ -1741,6 +1743,10 @@ skip_queue: goto skip_bitmap; } out: + *all_pinned = 0; + if (unlikely(pinned >= max_nr_move) && pulled == 0) + *all_pinned = 1; + /* * Right now, this is the only place pull_task() is called, * so we can safely collect pull_task() stats here rather than @@ -1918,7 +1924,7 @@ static int load_balance(int this_cpu, ru struct sched_group *group; runqueue_t *busiest; unsigned long imbalance; - int nr_moved; + int nr_moved, all_pinned; spin_lock(&this_rq->lock); schedstat_inc(sd, lb_cnt[idle]); @@ -1957,9 +1963,14 @@ static int load_balance(int this_cpu, ru */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle); + imbalance, sd, idle, + &all_pinned); spin_unlock(&busiest->lock); } + /* All tasks on this runqueue were pinned by CPU affinity */ + if (unlikely(all_pinned)) + goto out_balanced; + spin_unlock(&this_rq->lock); if (!nr_moved) { @@ -2026,7 +2037,7 @@ static int load_balance_newidle(int this struct sched_group *group; runqueue_t *busiest = NULL; unsigned long imbalance; - int nr_moved = 0; + int nr_moved = 0, all_pinned; schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); @@ -2048,7 +2059,7 @@ static int load_balance_newidle(int this schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, NEWLY_IDLE); + imbalance, sd, NEWLY_IDLE, &all_pinned); if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); @@ -2107,6 +2118,7 @@ static void active_load_balance(runqueue cpu_group = sd->groups; do { for_each_cpu_mask(cpu, cpu_group->cpumask) { + int all_pinned; if (busiest_rq->nr_running <= 1) /* no more tasks left to move */ return; @@ -2127,7 +2139,7 @@ static void active_load_balance(runqueue /* move a task from busiest_rq to target_rq */ double_lock_balance(busiest_rq, target_rq); if (move_tasks(target_rq, cpu, busiest_rq, - 1, sd, SCHED_IDLE)) { + 1, sd, SCHED_IDLE, &all_pinned)) { schedstat_inc(sd, alb_pushed); } else { schedstat_inc(sd, alb_failed); _