Patch from Kevin Pedretti The previous fix for unmapping hugetlb regions could still produce incorrect alignments if the munmap request covers multiple VMA's. Fix it by always unmapped the entire hugepage VMA inside the inner loop. mm/memory.c | 23 +++++++++-------------- 1 files changed, 9 insertions(+), 14 deletions(-) diff -puN mm/memory.c~hugetlb-unmap_vmas-fix mm/memory.c --- 25/mm/memory.c~hugetlb-unmap_vmas-fix 2003-03-11 22:55:28.000000000 -0800 +++ 25-akpm/mm/memory.c 2003-03-12 02:51:40.000000000 -0800 @@ -498,16 +498,6 @@ void unmap_page_range(struct mmu_gather #define ZAP_BLOCK_SIZE (~(0UL)) #endif -/* - * hugepage regions must be unmapped with HPAGE_SIZE granularity - */ -static inline unsigned long zap_block_size(struct vm_area_struct *vma) -{ - if (is_vm_hugetlb_page(vma)) - return HPAGE_SIZE; - return ZAP_BLOCK_SIZE; -} - /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlbp: address of the caller's struct mmu_gather @@ -538,7 +528,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted) { - unsigned long zap_bytes = zap_block_size(vma); + unsigned long zap_bytes = ZAP_BLOCK_SIZE; unsigned long tlb_start; /* For tlb_finish_mmu */ int tlb_start_valid = 0; int ret = 0; @@ -568,7 +558,12 @@ int unmap_vmas(struct mmu_gather **tlbp, ret++; while (start != end) { - unsigned long block = min(zap_bytes, end - start); + unsigned long block; + + if (is_vm_hugetlb_page(vma)) + block = end - start; + else + block = min(zap_bytes, end - start); if (!tlb_start_valid) { tlb_start = start; @@ -578,7 +573,7 @@ int unmap_vmas(struct mmu_gather **tlbp, unmap_page_range(*tlbp, vma, start, start + block); start += block; zap_bytes -= block; - if (zap_bytes != 0) + if ((long)zap_bytes > 0) continue; if (need_resched()) { tlb_finish_mmu(*tlbp, tlb_start, start); @@ -586,7 +581,7 @@ int unmap_vmas(struct mmu_gather **tlbp, *tlbp = tlb_gather_mmu(mm, 0); tlb_start_valid = 0; } - zap_bytes = zap_block_size(vma); + zap_bytes = ZAP_BLOCK_SIZE; } if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end) printk("%s: VMA list is not sorted correctly!\n", _