From: Anton Blanchard Restore the smt-enabled=off kernel command line functionality: - Remove the SMT_DYNAMIC state now that smt_snooze_delay allows for the same thing. - Remove the early prom.c parsing for the option, put it into an early_param instead. - In setup_cpu_maps honour the smt-enabled setting Note to Nathan: In order to allow cpu hotplug add of secondary threads after booting with smt-enabled=off, I had to initialise cpu_present_map to cpu_online_map in smp_cpus_done. Im not sure how you want to handle this but it seems our present map currently does not allow cpus to be added into the partition that werent there at boot (but were in the possible map). Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton --- 25-akpm/arch/ppc64/kernel/idle.c | 7 --- 25-akpm/arch/ppc64/kernel/prom.c | 70 +------------------------------------ 25-akpm/arch/ppc64/kernel/setup.c | 57 +++++++++++++++++++++++++++++- 25-akpm/arch/ppc64/kernel/smp.c | 10 +++++ 25-akpm/include/asm-ppc64/memory.h | 10 ----- 25-akpm/include/asm-ppc64/naca.h | 3 - 25-akpm/include/asm-ppc64/smp.h | 2 + 7 files changed, 71 insertions(+), 88 deletions(-) diff -puN arch/ppc64/kernel/idle.c~ppc64-restore-smt-enabled=off-kernel-command-line-option arch/ppc64/kernel/idle.c --- 25/arch/ppc64/kernel/idle.c~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.079572264 -0700 +++ 25-akpm/arch/ppc64/kernel/idle.c 2004-09-11 16:29:59.092570288 -0700 @@ -197,12 +197,7 @@ int dedicated_idle(void) HMT_very_low(); /* Low power mode */ - /* If the SMT mode is system controlled & the - * partner thread is doing work, switch into - * ST mode. - */ - if((naca->smt_state == SMT_DYNAMIC) && - (!(ppaca->lppaca.xIdle))) { + if (!(ppaca->lppaca.xIdle)) { /* Indicate we are no longer polling for * work, and then clear need_resched. If * need_resched was 1, set it back to 1 diff -puN arch/ppc64/kernel/prom.c~ppc64-restore-smt-enabled=off-kernel-command-line-option arch/ppc64/kernel/prom.c --- 25/arch/ppc64/kernel/prom.c~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.081571960 -0700 +++ 25-akpm/arch/ppc64/kernel/prom.c 2004-09-11 16:29:59.094569984 -0700 @@ -918,11 +918,7 @@ static void __init prom_hold_cpus(unsign = (void *)virt_to_abs(&__secondary_hold_acknowledge); unsigned long secondary_hold = virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold)); - struct systemcfg *_systemcfg = RELOC(systemcfg); struct prom_t *_prom = PTRRELOC(&prom); -#ifdef CONFIG_SMP - struct naca_struct *_naca = RELOC(naca); -#endif prom_debug("prom_hold_cpus: start...\n"); prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); @@ -1003,18 +999,18 @@ static void __init prom_hold_cpus(unsign (*acknowledge == ((unsigned long)-1)); i++ ) ; if (*acknowledge == cpuid) { - prom_printf("... done\n"); + prom_printf(" done\n"); /* We have to get every CPU out of OF, * even if we never start it. */ if (cpuid >= NR_CPUS) goto next; } else { - prom_printf("... failed: %x\n", *acknowledge); + prom_printf(" failed: %x\n", *acknowledge); } } #ifdef CONFIG_SMP else - prom_printf("%x : booting cpu %s\n", cpuid, path); + prom_printf("%x : boot cpu %s\n", cpuid, path); #endif next: #ifdef CONFIG_SMP @@ -1023,13 +1019,6 @@ next: cpuid++; if (cpuid >= NR_CPUS) continue; - prom_printf("%x : preparing thread ... ", - interrupt_server[i]); - if (_naca->smt_state) { - prom_printf("available\n"); - } else { - prom_printf("not available\n"); - } } #endif cpuid++; @@ -1068,57 +1057,6 @@ next: prom_debug("prom_hold_cpus: end...\n"); } -static void __init smt_setup(void) -{ - char *p, *q; - char my_smt_enabled = SMT_DYNAMIC; - ihandle prom_options = 0; - char option[9]; - unsigned long offset = reloc_offset(); - struct naca_struct *_naca = RELOC(naca); - char found = 0; - - if (strstr(RELOC(cmd_line), RELOC("smt-enabled="))) { - for (q = RELOC(cmd_line); (p = strstr(q, RELOC("smt-enabled="))) != 0; ) { - q = p + 12; - if (p > RELOC(cmd_line) && p[-1] != ' ') - continue; - found = 1; - if (q[0] == 'o' && q[1] == 'f' && - q[2] == 'f' && (q[3] == ' ' || q[3] == '\0')) { - my_smt_enabled = SMT_OFF; - } else if (q[0]=='o' && q[1] == 'n' && - (q[2] == ' ' || q[2] == '\0')) { - my_smt_enabled = SMT_ON; - } else { - my_smt_enabled = SMT_DYNAMIC; - } - } - } - if (!found) { - prom_options = call_prom("finddevice", 1, 1, ADDR("/options")); - if (prom_options != (ihandle) -1) { - prom_getprop(prom_options, "ibm,smt-enabled", - option, sizeof(option)); - if (option[0] != 0) { - found = 1; - if (!strcmp(option, RELOC("off"))) - my_smt_enabled = SMT_OFF; - else if (!strcmp(option, RELOC("on"))) - my_smt_enabled = SMT_ON; - else - my_smt_enabled = SMT_DYNAMIC; - } - } - } - - if (!found ) - my_smt_enabled = SMT_DYNAMIC; /* default to on */ - - _naca->smt_state = my_smt_enabled; -} - - #ifdef CONFIG_BOOTX_TEXT /* This function will enable the early boot text when doing OF booting. This @@ -1730,8 +1668,6 @@ prom_init(unsigned long r3, unsigned lon /* Initialize some system info into the Naca early... */ prom_initialize_naca(); - smt_setup(); - /* If we are on an SMP machine, then we *MUST* do the * following, regardless of whether we have an SMP * kernel or not. diff -puN arch/ppc64/kernel/setup.c~ppc64-restore-smt-enabled=off-kernel-command-line-option arch/ppc64/kernel/setup.c --- 25/arch/ppc64/kernel/setup.c~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.082571808 -0700 +++ 25-akpm/arch/ppc64/kernel/setup.c 2004-09-11 16:29:59.096569680 -0700 @@ -152,6 +152,50 @@ void __init disable_early_printk(void) } #if !defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP) + +static int smt_enabled_cmdline; + +/* Look for ibm,smt-enabled OF option */ +static void check_smt_enabled(void) +{ + struct device_node *dn; + char *smt_option; + + /* Allow the command line to overrule the OF option */ + if (smt_enabled_cmdline) + return; + + dn = of_find_node_by_path("/options"); + + if (dn) { + smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL); + + if (smt_option) { + if (!strcmp(smt_option, "on")) + smt_enabled_at_boot = 1; + else if (!strcmp(smt_option, "off")) + smt_enabled_at_boot = 0; + } + } +} + +/* Look for smt-enabled= cmdline option */ +static int __init early_smt_enabled(char *p) +{ + smt_enabled_cmdline = 1; + + if (!p) + return 0; + + if (!strcmp(p, "on") || !strcmp(p, "1")) + smt_enabled_at_boot = 1; + else if (!strcmp(p, "off") || !strcmp(p, "0")) + smt_enabled_at_boot = 0; + + return 0; +} +early_param("smt-enabled", early_smt_enabled); + /** * setup_cpu_maps - initialize the following cpu maps: * cpu_possible_map @@ -174,6 +218,8 @@ static void __init setup_cpu_maps(void) struct device_node *dn = NULL; int cpu = 0; + check_smt_enabled(); + while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { u32 *intserv; int j, len = sizeof(u32), nthreads; @@ -186,9 +232,16 @@ static void __init setup_cpu_maps(void) nthreads = len / sizeof(u32); for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { + /* + * Only spin up secondary threads if SMT is enabled. + * We must leave space in the logical map for the + * threads. + */ + if (j == 0 || smt_enabled_at_boot) { + cpu_set(cpu, cpu_present_map); + set_hard_smp_processor_id(cpu, intserv[j]); + } cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); - set_hard_smp_processor_id(cpu, intserv[j]); cpu++; } } diff -puN arch/ppc64/kernel/smp.c~ppc64-restore-smt-enabled=off-kernel-command-line-option arch/ppc64/kernel/smp.c --- 25/arch/ppc64/kernel/smp.c~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.084571504 -0700 +++ 25-akpm/arch/ppc64/kernel/smp.c 2004-09-11 16:29:59.097569528 -0700 @@ -74,6 +74,8 @@ void smp_call_function_interrupt(void); extern long register_vpa(unsigned long flags, unsigned long proc, unsigned long vpa); +int smt_enabled_at_boot = 1; + /* Low level assembly function used to backup CPU 0 state */ extern void __save_cpu_setup(void); @@ -942,4 +944,12 @@ void __init smp_cpus_done(unsigned int m smp_threads_ready = 1; set_cpus_allowed(current, old_mask); + + /* + * We know at boot the maximum number of cpus we can add to + * a partition and set cpu_possible_map accordingly. cpu_present_map + * needs to match for the hotplug code to allow us to hot add + * any offline cpus. + */ + cpu_present_map = cpu_possible_map; } diff -puN include/asm-ppc64/memory.h~ppc64-restore-smt-enabled=off-kernel-command-line-option include/asm-ppc64/memory.h --- 25/include/asm-ppc64/memory.h~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.086571200 -0700 +++ 25-akpm/include/asm-ppc64/memory.h 2004-09-11 16:29:59.097569528 -0700 @@ -56,14 +56,4 @@ static inline void isync(void) #define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n" #define HMT_HIGH "\tor 3,3,3 # high priority\n" -/* - * Various operational modes for SMT - * Off : never run threaded - * On : always run threaded - * Dynamic: Allow the system to switch modes as needed - */ -#define SMT_OFF 0 -#define SMT_ON 1 -#define SMT_DYNAMIC 2 - #endif diff -puN include/asm-ppc64/naca.h~ppc64-restore-smt-enabled=off-kernel-command-line-option include/asm-ppc64/naca.h --- 25/include/asm-ppc64/naca.h~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.087571048 -0700 +++ 25-akpm/include/asm-ppc64/naca.h 2004-09-11 16:29:59.097569528 -0700 @@ -37,9 +37,6 @@ struct naca_struct { u32 dCacheL1LinesPerPage; /* L1 d-cache lines / page 0x64 */ u32 iCacheL1LogLineSize; /* L1 i-cache line size Log2 0x68 */ u32 iCacheL1LinesPerPage; /* L1 i-cache lines / page 0x6c */ - u8 smt_state; /* 0 = SMT off 0x70 */ - /* 1 = SMT on */ - /* 2 = SMT dynamic */ u8 resv0[15]; /* Reserved 0x71 - 0x7F */ }; diff -puN include/asm-ppc64/smp.h~ppc64-restore-smt-enabled=off-kernel-command-line-option include/asm-ppc64/smp.h --- 25/include/asm-ppc64/smp.h~ppc64-restore-smt-enabled=off-kernel-command-line-option 2004-09-11 16:29:59.088570896 -0700 +++ 25-akpm/include/asm-ppc64/smp.h 2004-09-11 16:29:59.098569376 -0700 @@ -65,6 +65,8 @@ extern int query_cpu_stopped(unsigned in #define set_hard_smp_processor_id(CPU, VAL) \ do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) +extern int smt_enabled_at_boot; + #endif /* __ASSEMBLY__ */ #endif /* !(_PPC64_SMP_H) */ _