From: Nick Piggin From: Suresh Siddha ifdef out SMT active balancing when CONFIG_SCHED_SMT is not set. --- 25-akpm/kernel/sched.c | 10 ++++++++++ 1 files changed, 10 insertions(+) diff -puN kernel/sched.c~sched-ifdef-active-balancing kernel/sched.c --- 25/kernel/sched.c~sched-ifdef-active-balancing 2004-05-13 18:54:33.790150000 -0700 +++ 25-akpm/kernel/sched.c 2004-05-13 18:54:33.797148936 -0700 @@ -224,9 +224,11 @@ struct runqueue { #ifdef CONFIG_SMP struct sched_domain *sd; +#ifdef CONFIG_SCHED_SMT /* For active balancing */ int active_balance; int push_cpu; +#endif task_t *migration_thread; struct list_head migration_queue; @@ -1703,6 +1705,7 @@ static int load_balance(int this_cpu, ru if (!nr_moved) { sd->nr_balance_failed++; +#ifdef CONFIG_SCHED_SMT if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { int wake = 0; @@ -1722,6 +1725,7 @@ static int load_balance(int this_cpu, ru */ sd->nr_balance_failed = sd->cache_nice_tries; } +#endif } else sd->nr_balance_failed = 0; @@ -1793,6 +1797,7 @@ static inline void idle_balance(int this } } +#ifdef CONFIG_SCHED_SMT /* * active_load_balance is run by migration threads. It pushes a running * task off the cpu. It can be required to correctly have at least 1 task @@ -1850,6 +1855,7 @@ next_group: group = group->next; } while (group != sd->groups); } +#endif /* * rebalance_tick will get called every timer tick, on every CPU. @@ -3405,10 +3411,12 @@ static int migration_thread(void * data) goto wait_to_die; } +#ifdef CONFIG_SCHED_SMT if (rq->active_balance) { active_load_balance(rq, cpu); rq->active_balance = 0; } +#endif head = &rq->migration_queue; @@ -3913,8 +3921,10 @@ void __init sched_init(void) #ifdef CONFIG_SMP rq->sd = &sched_domain_init; rq->cpu_load = 0; +#ifdef CONFIG_SCHED_SMT rq->active_balance = 0; rq->push_cpu = 0; +#endif rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); #endif _