diff -urpN -X /home/fletch/.diff.exclude 520-queuestat/arch/i386/mm/init.c 540-separate_pmd/arch/i386/mm/init.c --- 520-queuestat/arch/i386/mm/init.c Sun Apr 20 22:07:17 2003 +++ 540-separate_pmd/arch/i386/mm/init.c Sun Apr 20 22:22:15 2003 @@ -514,9 +514,11 @@ void __init mem_init(void) #include kmem_cache_t *pmd_cache; +kmem_cache_t *kernel_pmd_cache; kmem_cache_t *pgd_cache; void pmd_ctor(void *, kmem_cache_t *, unsigned long); +void kernel_pmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void __init pgtable_cache_init(void) @@ -528,9 +530,18 @@ void __init pgtable_cache_init(void) SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pmd_ctor, NULL); - if (!pmd_cache) panic("pgtable_cache_init(): cannot create pmd cache"); + + kernel_pmd_cache = kmem_cache_create("pae_kernel_pmd", + (PTRS_PER_PMD*sizeof(pmd_t))*KERNEL_PGD_PTRS, + 0, + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, + kernel_pmd_ctor, + NULL); + + if (!kernel_pmd_cache) + panic("pgtable_cache_init(): cannot create kernel pmd cache"); } /* diff -urpN -X /home/fletch/.diff.exclude 520-queuestat/arch/i386/mm/pgtable.c 540-separate_pmd/arch/i386/mm/pgtable.c --- 520-queuestat/arch/i386/mm/pgtable.c Mon Mar 17 21:43:39 2003 +++ 540-separate_pmd/arch/i386/mm/pgtable.c Sun Apr 20 22:22:15 2003 @@ -136,7 +136,7 @@ pte_t *pte_alloc_one_kernel(struct mm_st do { pte = (pte_t *) __get_free_page(GFP_KERNEL); - if (pte) + if (pte) clear_page(pte); else { current->state = TASK_UNINTERRUPTIBLE; @@ -168,6 +168,7 @@ struct page *pte_alloc_one(struct mm_str } extern kmem_cache_t *pmd_cache; +extern kmem_cache_t *kernel_pmd_cache; extern kmem_cache_t *pgd_cache; void pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags) @@ -175,6 +176,15 @@ void pmd_ctor(void *__pmd, kmem_cache_t clear_page(__pmd); } +void kernel_pmd_ctor(void *__pmd, kmem_cache_t *kernel_pmd_cache, unsigned long flags) +{ + int i; + for (i=USER_PTRS_PER_PGD; i= USER_PTRS_PER_PGD) + pmd_cachep = kernel_pmd_cache; + + pmd = kmem_cache_alloc(pmd_cachep, SLAB_KERNEL); if (!pmd) goto out_oom; set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd)))); @@ -205,8 +221,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; out_oom: - for (i--; i >= 0; --i) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; --i) { + pmd_t *pmd = pmd_offset(&pgd[i],0); + kmem_cache_free(pmd_cachep, pmd); + } kmem_cache_free(pgd_cache, (void *)pgd); return NULL; } @@ -216,8 +234,14 @@ void pgd_free(pgd_t *pgd) int i; if (PTRS_PER_PMD > 1) { - for (i = 0; i < USER_PTRS_PER_PGD; ++i) { - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i = 0; i < PTRS_PER_PGD; i++) { + pmd_t *pmd_to_free = pmd_offset(&pgd[i],0); + + if( i >= USER_PTRS_PER_PGD ) + kmem_cache_free(kernel_pmd_cache, pmd_to_free); + else + kmem_cache_free(pmd_cache, pmd_to_free); + set_pgd(pgd + i, __pgd(0)); } }