diff -urNp 2.4.19pre8/arch/i386/kernel/process.c z/arch/i386/kernel/process.c --- 2.4.19pre8/arch/i386/kernel/process.c Wed May 15 21:51:27 2002 +++ z/arch/i386/kernel/process.c Fri May 24 03:00:30 2002 @@ -311,7 +311,7 @@ void machine_real_restart(unsigned char /* * Use `swapper_pg_dir' as our page directory. */ - asm volatile("movl %0,%%cr3": :"r" (__pa(swapper_pg_dir))); + load_cr3(swapper_pg_dir); /* Write 0x1234 to absolute memory location 0x472. The BIOS reads this on booting to tell it to "Bypass memory test (also warm diff -urNp 2.4.19pre8/arch/i386/kernel/smp.c z/arch/i386/kernel/smp.c --- 2.4.19pre8/arch/i386/kernel/smp.c Wed May 15 21:51:27 2002 +++ z/arch/i386/kernel/smp.c Fri May 24 03:01:01 2002 @@ -298,11 +298,15 @@ static spinlock_t tlbstate_lock = SPIN_L /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. + * + * We need to reload %cr3 since the page tables may be going + * away from under us.. */ static void inline leave_mm (unsigned long cpu) { BUG_ON(cpu_tlbstate[cpu].state == TLBSTATE_OK); clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); + load_cr3(swapper_pg_dir); } /* diff -urNp 2.4.19pre8/arch/i386/mm/init.c z/arch/i386/mm/init.c --- 2.4.19pre8/arch/i386/mm/init.c Fri May 3 02:11:58 2002 +++ z/arch/i386/mm/init.c Fri May 24 03:00:30 2002 @@ -331,7 +331,7 @@ void __init paging_init(void) { pagetable_init(); - __asm__( "movl %%ecx,%%cr3\n" ::"c"(__pa(swapper_pg_dir))); + load_cr3(swapper_pg_dir); #if CONFIG_X86_PAE /* diff -urNp 2.4.19pre8/include/asm-i386/mmu_context.h z/include/asm-i386/mmu_context.h --- 2.4.19pre8/include/asm-i386/mmu_context.h Wed May 15 22:50:42 2002 +++ z/include/asm-i386/mmu_context.h Fri May 24 03:00:30 2002 @@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_s set_bit(cpu, &next->cpu_vm_mask); set_bit(cpu, &next->context.cpuvalid); /* Re-load page tables */ - asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd))); + load_cr3(next->pgd); } #ifdef CONFIG_SMP else { @@ -51,9 +51,9 @@ static inline void switch_mm(struct mm_s out_of_line_bug(); if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled - * tlb flush IPI delivery. We must flush our tlb. + * tlb flush IPI delivery. We must reload %cr3. */ - local_flush_tlb(); + load_cr3(next->pgd); } if (!test_and_set_bit(cpu, &next->context.cpuvalid)) load_LDT(next); diff -urNp 2.4.19pre8/include/asm-i386/pgalloc.h z/include/asm-i386/pgalloc.h --- 2.4.19pre8/include/asm-i386/pgalloc.h Wed May 15 21:51:32 2002 +++ z/include/asm-i386/pgalloc.h Fri May 24 03:00:30 2002 @@ -139,7 +139,7 @@ static __inline__ void pte_free_slow(pte free_page((unsigned long)pte); } -#define pte_free(pte) pte_free_slow(pte) +#define pte_free(pte) pte_free_fast(pte) #define pgd_free(pgd) free_pgd_slow(pgd) #define pgd_alloc(mm) get_pgd_fast() @@ -227,13 +227,12 @@ struct tlb_state }; extern struct tlb_state cpu_tlbstate[NR_CPUS]; - -#endif +#endif /* CONFIG_SMP */ static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { - /* i386 does not keep any page table caches in TLB */ + flush_tlb_mm(mm); } #endif /* _I386_PGALLOC_H */ diff -urNp 2.4.19pre8/include/asm-i386/processor.h z/include/asm-i386/processor.h --- 2.4.19pre8/include/asm-i386/processor.h Fri May 3 20:23:55 2002 +++ z/include/asm-i386/processor.h Fri May 24 03:00:30 2002 @@ -191,6 +191,9 @@ static inline unsigned int cpuid_edx(uns #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ +#define load_cr3(pgdir) \ + asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))); + /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page diff -urNp 2.4.19pre8/mm/memory.c z/mm/memory.c --- 2.4.19pre8/mm/memory.c Wed May 15 21:51:32 2002 +++ z/mm/memory.c Fri May 24 03:00:30 2002 @@ -146,6 +146,7 @@ int check_pgt_cache(void) void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr) { pgd_t * page_dir = mm->pgd; + unsigned long last = first + nr; spin_lock(&mm->page_table_lock); page_dir += first; @@ -155,6 +156,8 @@ void clear_page_tables(struct mm_struct } while (--nr); spin_unlock(&mm->page_table_lock); + flush_tlb_pgtables(mm, first * PGDIR_SIZE, last * PGDIR_SIZE); + /* keep the page table cache within bounds */ check_pgt_cache(); } diff -urNp 2.4.19pre8/mm/mmap.c z/mm/mmap.c --- 2.4.19pre8/mm/mmap.c Wed May 15 21:51:32 2002 +++ z/mm/mmap.c Fri May 24 03:00:30 2002 @@ -908,7 +908,6 @@ no_mmaps: end_index = pgd_index(last); if (end_index > start_index) { clear_page_tables(mm, start_index, end_index - start_index); - flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK); } }