This allocates pgd's and pmd's using the slab and slab ctors. It has a benefit beyond preconstruction in that PAE pmd's are accounted via /proc/slabinfo diff -urpN wli-2.5.51-bk1-11/arch/i386/mm/init.c wli-2.5.51-bk1-12/arch/i386/mm/init.c --- wli-2.5.51-bk1-11/arch/i386/mm/init.c 2002-12-11 18:34:30.000000000 -0800 +++ wli-2.5.51-bk1-12/arch/i386/mm/init.c 2002-12-11 18:43:01.000000000 -0800 @@ -493,15 +493,27 @@ void __init mem_init(void) } #if CONFIG_X86_PAE -struct kmem_cache_s *pae_pgd_cachep; +#include + +kmem_cache_t *pae_pmd_cachep; +kmem_cache_t *pae_pgd_cachep; + +void pae_pmd_ctor(void *, kmem_cache_t *, unsigned long); +void pae_pgd_ctor(void *, kmem_cache_t *, unsigned long); void __init pgtable_cache_init(void) { /* * PAE pgds must be 16-byte aligned: */ + pae_pmd_cachep = kmem_cache_create("pae_pmd", 4096, 0, + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pmd_ctor, NULL); + + if (!pae_pmd_cachep) + panic("init_pae(): cannot allocate pae_pmd SLAB cache"); + pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0, - SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pgd_ctor, NULL); if (!pae_pgd_cachep) panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); } diff -urpN wli-2.5.51-bk1-11/arch/i386/mm/pgtable.c wli-2.5.51-bk1-12/arch/i386/mm/pgtable.c --- wli-2.5.51-bk1-11/arch/i386/mm/pgtable.c 2002-12-09 18:46:28.000000000 -0800 +++ wli-2.5.51-bk1-12/arch/i386/mm/pgtable.c 2002-12-11 18:44:52.000000000 -0800 @@ -168,38 +168,57 @@ struct page *pte_alloc_one(struct mm_str #if CONFIG_X86_PAE +extern kmem_cache_t *pae_pmd_cachep; + +void pae_pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags) +{ + clear_page(__pmd); +} + +void pae_pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags) +{ + pgd_t *pgd = __pgd; + + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +} + pgd_t *pgd_alloc(struct mm_struct *mm) { int i; - pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL); + pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, SLAB_KERNEL); - if (pgd) { - for (i = 0; i < USER_PTRS_PER_PGD; i++) { - unsigned long pmd = __get_free_page(GFP_KERNEL); - if (!pmd) - goto out_oom; - clear_page(pmd); - set_pgd(pgd + i, __pgd(1 + __pa(pmd))); + if (!pgd) + return NULL; + + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { + pmd_t *pmd = kmem_cache_alloc(pae_pmd_cachep, SLAB_KERNEL); + if (!pmd) + goto out_oom; + else if ((unsigned long)pmd & ~PAGE_MASK) { + printk("kmem_cache_alloc did wrong! death ensues!\n"); + goto out_oom; } - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd)))); } return pgd; + out_oom: - for (i--; i >= 0; i--) - free_page((unsigned long)__va(pgd_val(pgd[i])-1)); - kmem_cache_free(pae_pgd_cachep, pgd); + for (i--; i >= 0; --i) + kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pae_pgd_cachep, (void *)pgd); return NULL; } void pgd_free(pgd_t *pgd) { int i; - - for (i = 0; i < USER_PTRS_PER_PGD; i++) - free_page((unsigned long)__va(pgd_val(pgd[i])-1)); - kmem_cache_free(pae_pgd_cachep, pgd); + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { + kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1)); + set_pgd(pgd + i, __pgd(0)); + } + kmem_cache_free(pae_pgd_cachep, (void *)pgd); } #else diff -urpN wli-2.5.51-bk1-11/include/asm-i386/pgalloc.h wli-2.5.51-bk1-12/include/asm-i386/pgalloc.h --- wli-2.5.51-bk1-11/include/asm-i386/pgalloc.h 2002-12-09 18:45:43.000000000 -0800 +++ wli-2.5.51-bk1-12/include/asm-i386/pgalloc.h 2002-12-11 18:42:06.000000000 -0800 @@ -20,11 +20,11 @@ static inline void pmd_populate(struct m * Allocate and free page tables. */ -extern pgd_t *pgd_alloc(struct mm_struct *); -extern void pgd_free(pgd_t *pgd); +pgd_t *pgd_alloc(struct mm_struct *); +void pgd_free(pgd_t *pgd); -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); -extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); +pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); +struct page *pte_alloc_one(struct mm_struct *, unsigned long); static inline void pte_free_kernel(pte_t *pte) {