From: Hugh Dickins Nick Piggin's patch to fold away most of the pud and pmd levels when not required. Adjusted to define minimal pud_addr_end (in the 4LEVEL_HACK case too) and pmd_addr_end. Responsible for half of the savings. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- 25-akpm/include/asm-generic/4level-fixup.h | 4 +++ 25-akpm/include/asm-generic/pgtable-nopmd.h | 5 ++++ 25-akpm/include/asm-generic/pgtable-nopud.h | 5 ++++ 25-akpm/include/asm-generic/pgtable.h | 4 +++ 25-akpm/mm/memory.c | 34 ++++++---------------------- 5 files changed, 26 insertions(+), 26 deletions(-) diff -puN include/asm-generic/4level-fixup.h~ptwalk-pud-and-pmd-folded include/asm-generic/4level-fixup.h --- 25/include/asm-generic/4level-fixup.h~ptwalk-pud-and-pmd-folded 2005-03-09 16:34:11.000000000 -0800 +++ 25-akpm/include/asm-generic/4level-fixup.h 2005-03-09 16:34:11.000000000 -0800 @@ -2,6 +2,7 @@ #define _4LEVEL_FIXUP_H #define __ARCH_HAS_4LEVEL_HACK +#define __PAGETABLE_PUD_FOLDED #define PUD_SIZE PGDIR_SIZE #define PUD_MASK PGDIR_MASK @@ -31,4 +32,7 @@ #define pud_free(x) do { } while (0) #define __pud_free_tlb(tlb, x) do { } while (0) +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + #endif diff -puN include/asm-generic/pgtable.h~ptwalk-pud-and-pmd-folded include/asm-generic/pgtable.h --- 25/include/asm-generic/pgtable.h~ptwalk-pud-and-pmd-folded 2005-03-09 16:34:11.000000000 -0800 +++ 25-akpm/include/asm-generic/pgtable.h 2005-03-09 16:34:11.000000000 -0800 @@ -146,15 +146,19 @@ static inline void ptep_set_wrprotect(st (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#endif +#ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#endif #ifndef __ASSEMBLY__ /* diff -puN include/asm-generic/pgtable-nopmd.h~ptwalk-pud-and-pmd-folded include/asm-generic/pgtable-nopmd.h --- 25/include/asm-generic/pgtable-nopmd.h~ptwalk-pud-and-pmd-folded 2005-03-09 16:34:11.000000000 -0800 +++ 25-akpm/include/asm-generic/pgtable-nopmd.h 2005-03-09 16:34:11.000000000 -0800 @@ -5,6 +5,8 @@ #include +#define __PAGETABLE_PMD_FOLDED + /* * Having the pmd type consist of a pud gets the size right, and allows * us to conceptually access the pud entry that this pmd is folded into @@ -55,6 +57,9 @@ static inline pmd_t * pmd_offset(pud_t * #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb, x) do { } while (0) +#undef pmd_addr_end +#define pmd_addr_end(addr, end) (end) + #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOPMD_H */ diff -puN include/asm-generic/pgtable-nopud.h~ptwalk-pud-and-pmd-folded include/asm-generic/pgtable-nopud.h --- 25/include/asm-generic/pgtable-nopud.h~ptwalk-pud-and-pmd-folded 2005-03-09 16:34:11.000000000 -0800 +++ 25-akpm/include/asm-generic/pgtable-nopud.h 2005-03-09 16:34:11.000000000 -0800 @@ -3,6 +3,8 @@ #ifndef __ASSEMBLY__ +#define __PAGETABLE_PUD_FOLDED + /* * Having the pud type consist of a pgd gets the size right, and allows * us to conceptually access the pgd entry that this pud is folded into @@ -52,5 +54,8 @@ static inline pud_t * pud_offset(pgd_t * #define pud_free(x) do { } while (0) #define __pud_free_tlb(tlb, x) do { } while (0) +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOPUD_H */ diff -puN mm/memory.c~ptwalk-pud-and-pmd-folded mm/memory.c --- 25/mm/memory.c~ptwalk-pud-and-pmd-folded 2005-03-09 16:34:11.000000000 -0800 +++ 25-akpm/mm/memory.c 2005-03-09 16:34:11.000000000 -0800 @@ -1973,15 +1973,12 @@ int handle_mm_fault(struct mm_struct *mm return VM_FAULT_OOM; } -#ifndef __ARCH_HAS_4LEVEL_HACK +#ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * * We've already handled the fast-path in-line, and we own the * page table lock. - * - * On a two-level or three-level page table, this ends up actually being - * entirely optimized away. */ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { @@ -2005,15 +2002,14 @@ pud_t fastcall *__pud_alloc(struct mm_st out: return pud_offset(pgd, address); } +#endif /* __PAGETABLE_PUD_FOLDED */ +#ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * * We've already handled the fast-path in-line, and we own the * page table lock. - * - * On a two-level page table, this ends up actually being entirely - * optimized away. */ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { @@ -2029,38 +2025,24 @@ pmd_t fastcall *__pmd_alloc(struct mm_st * Because we dropped the lock, we should re-check the * entry, as somebody else could have populated it.. */ +#ifndef __ARCH_HAS_4LEVEL_HACK if (pud_present(*pud)) { pmd_free(new); goto out; } pud_populate(mm, pud, new); - out: - return pmd_offset(pud, address); -} #else -pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) -{ - pmd_t *new; - - spin_unlock(&mm->page_table_lock); - new = pmd_alloc_one(mm, address); - spin_lock(&mm->page_table_lock); - if (!new) - return NULL; - - /* - * Because we dropped the lock, we should re-check the - * entry, as somebody else could have populated it.. - */ if (pgd_present(*pud)) { pmd_free(new); goto out; } pgd_populate(mm, pud, new); -out: +#endif /* __ARCH_HAS_4LEVEL_HACK */ + + out: return pmd_offset(pud, address); } -#endif +#endif /* __PAGETABLE_PMD_FOLDED */ int make_pages_present(unsigned long addr, unsigned long end) { _