From: Jeff Dike This patch adds stub pml4 support to UML. Signed-off-by: Jeff Dike Signed-off-by: Andrew Morton --- 25-akpm/arch/um/kernel/mem.c | 14 ++++++++------ 25-akpm/arch/um/kernel/process_kern.c | 2 +- 25-akpm/arch/um/kernel/skas/tlb.c | 4 ++-- 25-akpm/arch/um/kernel/tlb.c | 6 +++--- 25-akpm/arch/um/kernel/trap_kern.c | 2 +- 25-akpm/arch/um/kernel/tt/tlb.c | 6 +++--- 25-akpm/include/asm-um/page.h | 2 ++ 25-akpm/include/asm-um/pgalloc.h | 7 ++++--- 25-akpm/include/asm-um/pgtable.h | 18 ++++-------------- 9 files changed, 28 insertions(+), 33 deletions(-) diff -puN arch/um/kernel/mem.c~uml-pml4-support arch/um/kernel/mem.c --- 25/arch/um/kernel/mem.c~uml-pml4-support 2004-11-15 20:00:54.908450992 -0800 +++ 25-akpm/arch/um/kernel/mem.c 2004-11-15 20:00:54.923448712 -0800 @@ -140,7 +140,8 @@ pte_t *kmap_pte; pgprot_t kmap_prot; #define kmap_get_fixmap_pte(vaddr) \ - pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) + pte_offset_kernel(pmd_offset(pml4_pgd_offset(pml4_offset_k(vaddr), + vaddr), (vaddr)), (vaddr)) void __init kmap_init(void) { @@ -285,15 +286,16 @@ void show_mem(void) * Allocate and free page tables. */ -pgd_t *pgd_alloc(struct mm_struct *mm) +pgd_t *__pgd_alloc(struct mm_struct *mm, pml4_t *pml4, unsigned long address) { pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); if (pgd) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t)); + memcpy(pgd + USER_PGDS_IN_LAST_PML4, + swapper_pg_dir + USER_PGDS_IN_LAST_PML4, + (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * + sizeof(pgd_t)); } return pgd; } diff -puN arch/um/kernel/process_kern.c~uml-pml4-support arch/um/kernel/process_kern.c --- 25/arch/um/kernel/process_kern.c~uml-pml4-support 2004-11-15 20:00:54.910450688 -0800 +++ 25-akpm/arch/um/kernel/process_kern.c 2004-11-15 20:00:54.924448560 -0800 @@ -240,7 +240,7 @@ void *um_virt_to_phys(struct task_struct if(task->mm == NULL) return(ERR_PTR(-EINVAL)); - pgd = pgd_offset(task->mm, addr); + pgd = pml4_pgd_offset(pml4_offset(task->mm, addr), addr); pmd = pmd_offset(pgd, addr); if(!pmd_present(*pmd)) return(ERR_PTR(-EINVAL)); diff -puN arch/um/kernel/skas/tlb.c~uml-pml4-support arch/um/kernel/skas/tlb.c --- 25/arch/um/kernel/skas/tlb.c~uml-pml4-support 2004-11-15 20:00:54.911450536 -0800 +++ 25-akpm/arch/um/kernel/skas/tlb.c 2004-11-15 20:00:54.924448560 -0800 @@ -26,7 +26,7 @@ static void fix_range(struct mm_struct * if(mm == NULL) return; fd = mm->context.skas.mm_fd; for(addr = start_addr; addr < end_addr;){ - npgd = pgd_offset(mm, addr); + npgd = pml4_pgd_offset(pml4_offset(mm, addr), addr); npmd = pmd_offset(npgd, addr); if(pmd_present(*npmd)){ npte = pte_offset_kernel(npmd, addr); @@ -78,7 +78,7 @@ void flush_tlb_kernel_range_skas(unsigne mm = &init_mm; for(addr = start; addr < end;){ - pgd = pgd_offset(mm, addr); + pgd = pml4_pgd_offset(pml4_offset(mm, addr), addr); pmd = pmd_offset(pgd, addr); if(pmd_present(*pmd)){ pte = pte_offset_kernel(pmd, addr); diff -puN arch/um/kernel/tlb.c~uml-pml4-support arch/um/kernel/tlb.c --- 25/arch/um/kernel/tlb.c~uml-pml4-support 2004-11-15 20:00:54.913450232 -0800 +++ 25-akpm/arch/um/kernel/tlb.c 2004-11-15 20:00:54.925448408 -0800 @@ -56,7 +56,7 @@ void force_flush_all(void) pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address) { - return(pgd_offset(mm, address)); + return(pml4_pgd_offset(pml4_offset(mm, address), address)); } pmd_t *pmd_offset_proc(pgd_t *pgd, unsigned long address) @@ -71,8 +71,8 @@ pte_t *pte_offset_proc(pmd_t *pmd, unsig pte_t *addr_pte(struct task_struct *task, unsigned long addr) { - return(pte_offset_kernel(pmd_offset(pgd_offset(task->mm, addr), addr), - addr)); + return(pte_offset_kernel(pmd_offset(pgd_offset_proc(task->mm, addr), + addr), addr)); } /* diff -puN arch/um/kernel/trap_kern.c~uml-pml4-support arch/um/kernel/trap_kern.c --- 25/arch/um/kernel/trap_kern.c~uml-pml4-support 2004-11-15 20:00:54.914450080 -0800 +++ 25-akpm/arch/um/kernel/trap_kern.c 2004-11-15 20:00:54.925448408 -0800 @@ -54,7 +54,7 @@ int handle_page_fault(unsigned long addr if(is_write && !(vma->vm_flags & VM_WRITE)) goto out; page = address & PAGE_MASK; - pgd = pgd_offset(mm, page); + pgd = pml4_pgd_offset(pml4_offset(mm, page), page); pmd = pmd_offset(pgd, page); do { survive: diff -puN arch/um/kernel/tt/tlb.c~uml-pml4-support arch/um/kernel/tt/tlb.c --- 25/arch/um/kernel/tt/tlb.c~uml-pml4-support 2004-11-15 20:00:54.916449776 -0800 +++ 25-akpm/arch/um/kernel/tt/tlb.c 2004-11-15 20:00:54.926448256 -0800 @@ -41,7 +41,7 @@ static void fix_range(struct mm_struct * addr = STACK_TOP - ABOVE_KMEM; continue; } - npgd = pgd_offset(mm, addr); + npgd = pml4_pgd_offset(pml4_offset(mm, addr), addr); npmd = pmd_offset(npgd, addr); if(pmd_present(*npmd)){ npte = pte_offset_kernel(npmd, addr); @@ -97,7 +97,7 @@ static void flush_kernel_vm_range(unsign mm = &init_mm; for(addr = start; addr < end;){ - pgd = pgd_offset(mm, addr); + pgd = pml4_pgd_offset(pml4_offset(mm, addr), addr); pmd = pmd_offset(pgd, addr); if(pmd_present(*pmd)){ pte = pte_offset_kernel(pmd, addr); @@ -161,7 +161,7 @@ void mprotect_kernel_vm(int w) mm = &init_mm; for(addr = start_vm; addr < end_vm;){ - pgd = pgd_offset(mm, addr); + pgd = pml4_pgd_offset(pml4_offset(mm, addr), addr); pmd = pmd_offset(pgd, addr); if(pmd_present(*pmd)){ pte = pte_offset_kernel(pmd, addr); diff -puN include/asm-um/page.h~uml-pml4-support include/asm-um/page.h --- 25/include/asm-um/page.h~uml-pml4-support 2004-11-15 20:00:54.917449624 -0800 +++ 25-akpm/include/asm-um/page.h 2004-11-15 20:00:54.926448256 -0800 @@ -49,4 +49,6 @@ extern struct page *arch_validate(struct extern void arch_free_page(struct page *page, int order); #define HAVE_ARCH_FREE_PAGE +#include + #endif diff -puN include/asm-um/pgalloc.h~uml-pml4-support include/asm-um/pgalloc.h --- 25/include/asm-um/pgalloc.h~uml-pml4-support 2004-11-15 20:00:54.919449320 -0800 +++ 25-akpm/include/asm-um/pgalloc.h 2004-11-15 20:00:54.926448256 -0800 @@ -19,9 +19,6 @@ static inline void pmd_populate(struct m set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); } -extern pgd_t *pgd_alloc(struct mm_struct *); -extern void pgd_free(pgd_t *pgd); - extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); @@ -49,6 +46,10 @@ static inline void pte_free(struct page #define check_pgt_cache() do { } while (0) +extern void pgd_free(pgd_t *pgd); + +#include + #endif /* diff -puN include/asm-um/pgtable.h~uml-pml4-support include/asm-um/pgtable.h --- 25/include/asm-um/pgtable.h~uml-pml4-support 2004-11-15 20:00:54.920449168 -0800 +++ 25-akpm/include/asm-um/pgtable.h 2004-11-15 20:00:54.927448104 -0800 @@ -37,7 +37,7 @@ extern unsigned long *empty_zero_page; #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_PGD_NR 0 #define pte_ERROR(e) \ @@ -372,19 +372,7 @@ static inline pte_t pte_modify(pte_t pte */ #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -/* - * pgd_offset() returns a (pgd_t *) - * pgd_index() is used get the offset into the pgd page's array of pgd_t's; - */ -#define pgd_offset(mm, address) \ -((mm)->pgd + ((address) >> PGDIR_SHIFT)) - - -/* - * a shortcut which implies the use of the kernel's pgd, instead - * of a process's - */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_index_k(addr) pgd_index(addr) #define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) @@ -426,6 +414,8 @@ static inline pmd_t * pmd_offset(pgd_t * #include +#include + #endif #endif _