Patch from Kevin Pedretti The unmap_vmas() logic is designed to chew away at all the pages without holding off preemption for too long. But with CONFIG_SMP=y and CONFIG_PREEMPT=y the number of pages which we batch up between rescheduling opportunities is not a multiple of HPAGE_SIZE. So unmap_vmas() ends up calling unmap_hugepage_range() with a poorly aligned&sized region, and it goes BUG. Fix that up by ensuring that we always work across hugepage regions in HPAGE_SIZE chunks. 25-akpm/include/linux/hugetlb.h | 1 + 25-akpm/mm/memory.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff -puN mm/memory.c~hugetlb-unmap_vmas-fix mm/memory.c --- 25/mm/memory.c~hugetlb-unmap_vmas-fix Fri Mar 7 13:44:39 2003 +++ 25-akpm/mm/memory.c Fri Mar 7 13:44:39 2003 @@ -486,6 +486,16 @@ void unmap_page_range(struct mmu_gather #define ZAP_BLOCK_SIZE (~(0UL)) #endif +/* + * hugepage regions must be unmapped with HPAGE_SIZE granularity + */ +static inline unsigned long zap_block_size(struct vm_area_struct *vma) +{ + if (is_vm_hugetlb_page(vma)) + return HPAGE_SIZE; + return ZAP_BLOCK_SIZE; +} + /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlbp: address of the caller's struct mmu_gather @@ -516,7 +526,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted) { - unsigned long zap_bytes = ZAP_BLOCK_SIZE; + unsigned long zap_bytes = zap_block_size(vma); unsigned long tlb_start; /* For tlb_finish_mmu */ int tlb_start_valid = 0; int ret = 0; @@ -564,7 +574,7 @@ int unmap_vmas(struct mmu_gather **tlbp, *tlbp = tlb_gather_mmu(mm, 0); tlb_start_valid = 0; } - zap_bytes = ZAP_BLOCK_SIZE; + zap_bytes = zap_block_size(vma); } if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end) printk("%s: VMA list is not sorted correctly!\n", diff -puN include/linux/hugetlb.h~hugetlb-unmap_vmas-fix include/linux/hugetlb.h --- 25/include/linux/hugetlb.h~hugetlb-unmap_vmas-fix Fri Mar 7 14:23:58 2003 +++ 25-akpm/include/linux/hugetlb.h Fri Mar 7 14:24:12 2003 @@ -66,6 +66,7 @@ static inline int is_vm_hugetlb_page(str #ifndef HPAGE_MASK #define HPAGE_MASK 0 /* Keep the compiler happy */ +#define HPAGE_SIZE 0 #endif #endif /* !CONFIG_HUGETLB_PAGE */ _