From: Anton Blanchard We werent masking the lower bits of the VA in a tlbie(l) instruction. While most CPUs ignore this we should play it safe and follow the spec. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton --- 25-akpm/include/asm-ppc64/mmu.h | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-) diff -puN include/asm-ppc64/mmu.h~ppc64-mask-lower-bits-in-tlbie include/asm-ppc64/mmu.h --- 25/include/asm-ppc64/mmu.h~ppc64-mask-lower-bits-in-tlbie 2005-01-25 21:38:49.083183592 -0800 +++ 25-akpm/include/asm-ppc64/mmu.h 2005-01-25 21:38:49.087182984 -0800 @@ -122,10 +122,13 @@ static inline void __tlbie(unsigned long /* clear top 16 bits, non SLS segment */ va &= ~(0xffffULL << 48); - if (large) + if (large) { + va &= HPAGE_MASK; asm volatile("tlbie %0,1" : : "r"(va) : "memory"); - else + } else { + va &= PAGE_MASK; asm volatile("tlbie %0,0" : : "r"(va) : "memory"); + } } static inline void tlbie(unsigned long va, int large) @@ -139,6 +142,7 @@ static inline void __tlbiel(unsigned lon { /* clear top 16 bits, non SLS segment */ va &= ~(0xffffULL << 48); + va &= PAGE_MASK; /* * Thanks to Alan Modra we are now able to use machine specific _