From 951bc82c53f30ec6b4c2d04a051e74ea9a89b669 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 31 May 2006 01:24:02 -0700 Subject: [SPARC64]: Make smp_processor_id() functional before start_kernel() Uses of smp_processor_id() get pushed earlier and earlier in the start_kernel() sequence. So just get it working before we call start_kernel() to avoid all possible problems. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 30 ++++++++++++++++++++++++++++++ arch/sparc64/kernel/setup.c | 23 +++++++++++------------ arch/sparc64/kernel/smp.c | 16 +++------------- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 3eadac5e171e48..31c5892f5acc49 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -493,6 +494,35 @@ tlb_fixup_done: call prom_init mov %l7, %o0 ! OpenPROM cif handler + /* Initialize current_thread_info()->cpu as early as possible. + * In order to do that accurately we have to patch up the get_cpuid() + * assembler sequences. And that, in turn, requires that we know + * if we are on a Starfire box or not. While we're here, patch up + * the sun4v sequences as well. + */ + call check_if_starfire + nop + call per_cpu_patch + nop + call sun4v_patch + nop + +#ifdef CONFIG_SMP + call hard_smp_processor_id + nop + cmp %o0, NR_CPUS + blu,pt %xcc, 1f + nop + call boot_cpu_id_too_large + nop + /* Not reached... */ + +1: +#else + mov 0, %o0 +#endif + stb %o0, [%g6 + TI_CPU] + /* Off we go.... */ call start_kernel nop diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 005167f82419b4..9cf1c88cd774ff 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; -static void __init per_cpu_patch(void) +void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; unsigned long ver; @@ -280,7 +280,7 @@ static void __init per_cpu_patch(void) } } -static void __init sun4v_patch(void) +void __init sun4v_patch(void) { struct sun4v_1insn_patch_entry *p1; struct sun4v_2insn_patch_entry *p2; @@ -315,6 +315,15 @@ static void __init sun4v_patch(void) } } +#ifdef CONFIG_SMP +void __init boot_cpu_id_too_large(int cpu) +{ + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", + cpu, NR_CPUS); + prom_halt(); +} +#endif + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p) conswitchp = &prom_con; #endif - /* Work out if we are starfire early on */ - check_if_starfire(); - - /* Now we know enough to patch the get_cpuid sequences - * used by trap code. - */ - per_cpu_patch(); - - sun4v_patch(); - boot_flags_init(*cmdline_p); idprom_init(); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 90eaca3ec9a628..4e8cd79156e0e0 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -1264,7 +1264,6 @@ void __init smp_tick_init(void) boot_cpu_id = hard_smp_processor_id(); current_tick_offset = timer_tick_offset; - cpu_set(boot_cpu_id, cpu_online_map); prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; } @@ -1345,18 +1344,6 @@ void __init smp_setup_cpu_possible_map(void) void __devinit smp_prepare_boot_cpu(void) { - int cpu = hard_smp_processor_id(); - - if (cpu >= NR_CPUS) { - prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); - prom_halt(); - } - - current_thread_info()->cpu = cpu; - __local_per_cpu_offset = __per_cpu_offset(cpu); - - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), phys_cpu_present_map); } int __devinit __cpu_up(unsigned int cpu) @@ -1433,4 +1420,7 @@ void __init setup_per_cpu_areas(void) for (i = 0; i < NR_CPUS; i++, ptr += size) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + + /* Setup %g5 for the boot cpu. */ + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); } -- cgit 1.2.3-korg From 0b0968a3e691771bf87e1ce747b2c7d23b5526c8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 1 Jun 2006 17:47:25 -0700 Subject: [SPARC64]: Fix D-cache corruption in mremap If we move a mapping from one virtual address to another, and this changes the virtual color of the mapping to those pages, we can see corrupt data due to D-cache aliasing. Check for and deal with this by overriding the move_pte() macro. Set things up so that other platforms can cleanly override the move_pte() macro too. Signed-off-by: David S. Miller --- include/asm-generic/pgtable.h | 11 +---------- include/asm-mips/pgtable.h | 10 +++++++++- include/asm-sparc64/pgtable.h | 17 +++++++++++++++++ 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 358e4d309ceb17..c2059a3a06216c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif -#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE +#ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) -#else -#define move_pte(pte, prot, old_addr, new_addr) \ -({ \ - pte_t newpte = (pte); \ - if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ - pte_page(pte) == ZERO_PAGE(old_addr)) \ - newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ - newpte; \ -}) #endif /* diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 702a28fa7a3489..69cebbd9f3e019 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -70,7 +70,15 @@ extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) -#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE +#define __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) \ +({ \ + pte_t newpte = (pte); \ + if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ + pte_page(pte) == ZERO_PAGE(old_addr)) \ + newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ + newpte; \ +}) extern void paging_init(void); diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index c44e7466534e9d..cd464f469a2c2b 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -689,6 +689,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p #define pte_clear(mm,addr,ptep) \ set_pte_at((mm), (addr), (ptep), __pte(0UL)) +#ifdef DCACHE_ALIASING_POSSIBLE +#define __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) \ +({ \ + pte_t newpte = (pte); \ + if (tlb_type != hypervisor && pte_present(pte)) { \ + unsigned long this_pfn = pte_pfn(pte); \ + \ + if (pfn_valid(this_pfn) && \ + (((old_addr) ^ (new_addr)) & (1 << 13))) \ + flush_dcache_page_all(current->mm, \ + pfn_to_page(this_pfn)); \ + } \ + newpte; \ +}) +#endif + extern pgd_t swapper_pg_dir[2048]; extern pmd_t swapper_low_pmd_dir[2048]; -- cgit 1.2.3-korg