diff -urN linux-2.4.19rc5aa1/include/asm-i386/processor.h linux/include/asm-i386/processor.h --- linux-2.4.19rc5aa1/include/asm-i386/processor.h Sat Aug 24 23:34:21 2002 +++ linux/include/asm-i386/processor.h Sat Aug 24 23:36:52 2002 @@ -490,4 +490,6 @@ #define cpu_relax() rep_nop() +#define ARCH_HAS_SMP_BALANCE + #endif /* __ASM_I386_PROCESSOR_H */ diff -urN linux-2.4.19rc5aa1/include/asm-i386/smp_balance.h linux/include/asm-i386/smp_balance.h --- linux-2.4.19rc5aa1/include/asm-i386/smp_balance.h Wed Dec 31 19:00:00 1969 +++ linux/include/asm-i386/smp_balance.h Sat Aug 24 23:44:49 2002 @@ -0,0 +1,48 @@ +#ifndef _ASM_SMP_BALANCE_H +#define _ASM_SMP_BALANCE_H + +/* + * We have an architecture-specific SMP load balancer to improve + * scheduling behavior on hyperthreaded CPUs. Since only P4s have + * HT, maybe this should be conditional on CONFIG_MPENTIUM4... + * + */ + +/* + * Find any idle processor package (i.e. both virtual processors are idle) + */ +static inline int find_idle_package(int this_cpu) +{ + int i; + + this_cpu = cpu_number_map(this_cpu); + + for (i = (this_cpu + 1) % smp_num_cpus; + i != this_cpu; + i = (i + 1) % smp_num_cpus) { + int physical = cpu_logical_map(i); + int sibling = cpu_sibling_map[physical]; + + if (idle_cpu(physical) && idle_cpu(sibling)) + return physical; + } + return -1; /* not found */ +} + +static inline int arch_load_balance(int this_cpu, int idle) +{ + /* Special hack for hyperthreading */ + if (unlikely(smp_num_siblings > 1 && idle && !idle_cpu(cpu_sibling_map[this_cpu]))) { + int found; + struct runqueue *rq_target; + + if ((found = find_idle_package(this_cpu)) >= 0 ) { + rq_target = cpu_rq(found); + resched_task(rq_target->idle); + return 1; + } + } + return 0; +} + +#endif /* _ASM_SMP_BALANCE_H */ diff -urN linux-2.4.19rc5aa1/include/linux/smp_balance.h linux/include/linux/smp_balance.h --- linux-2.4.19rc5aa1/include/linux/smp_balance.h Wed Dec 31 19:00:00 1969 +++ linux/include/linux/smp_balance.h Sat Aug 24 23:36:52 2002 @@ -0,0 +1,14 @@ +#ifndef _LINUX_SMP_BALANCE_H +#define _LINUX_SMP_BALANCE_H + +/* + * per-architecture load balancing logic, e.g. for hyperthreading + */ + +#ifdef ARCH_HAS_SMP_BALANCE +#include +#else +#define arch_load_balance(x, y) (0) +#endif + +#endif /* _LINUX_SMP_BALANCE_H */ diff -urN linux-2.4.19rc5aa1/kernel/sched.c linux/kernel/sched.c --- linux-2.4.19rc5aa1/kernel/sched.c Sat Aug 24 23:34:19 2002 +++ linux/kernel/sched.c Sat Aug 24 23:46:12 2002 @@ -484,28 +479,7 @@ set_need_resched(); } -#ifdef __i386__ -/* - * Find any idle processor package (i.e. both virtual processors are idle) - */ -static inline int find_idle_package(int this_cpu) -{ - int i; - - this_cpu = cpu_number_map(this_cpu); - - for (i = (this_cpu + 1) % smp_num_cpus; - i != this_cpu; - i = (i + 1) % smp_num_cpus) { - int physical = cpu_logical_map(i); - int sibling = cpu_sibling_map[physical]; - - if (idle_cpu(physical) && idle_cpu(sibling)) - return physical; - } - return -1; /* not found */ -} -#endif +#include /* * Current runqueue is empty, or rebalance tick: if there is an @@ -524,19 +498,11 @@ prio_array_t *array; list_t *head, *curr; -#if defined(__i386__) - /* Special hack for hyperthreading */ - if (unlikely(smp_num_siblings > 1) && idle && !idle_cpu(cpu_sibling_map[this_cpu])) { - int found; - runqueue_t *rq_target; - - if ((found = find_idle_package(this_cpu)) >= 0 ) { - rq_target = cpu_rq(found); - resched_task(rq_target->idle); - return; - } - } -#endif + /* + * Handle architecture-specific balancing, such as hyperthreading. + */ + if (arch_load_balance(this_cpu, idle)) + return; /* * We search all runqueues to find the most busy one.