From: Andrea Arcangeli The p->size returned by remove_vm_area is off by one, since it includes the guard page, and this lead to every iounmap to execute change_page_attr on one more page than ioremap_nocache. This is really a bug in mm/vmalloc.c, we sure don't want to expose the size of the guard page to the caller. Signed-off-by: Andrew Morton --- 25-akpm/arch/i386/mm/ioremap.c | 4 ++-- 25-akpm/arch/x86_64/mm/ioremap.c | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff -puN arch/i386/mm/ioremap.c~iounmap-fix arch/i386/mm/ioremap.c --- 25/arch/i386/mm/ioremap.c~iounmap-fix 2004-12-03 20:02:08.332048392 -0800 +++ 25-akpm/arch/i386/mm/ioremap.c 2004-12-03 20:05:28.618600208 -0800 @@ -152,7 +152,7 @@ void __iomem * __ioremap(unsigned long p /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area(size, VM_IOREMAP | (flags << 24)); if (!area) return NULL; area->phys_addr = phys_addr; @@ -232,7 +232,7 @@ void iounmap(volatile void __iomem *addr return; } - if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { + if ((p->flags >> 24) && p->phys_addr < virt_to_phys(high_memory)) { change_page_attr(virt_to_page(__va(p->phys_addr)), p->size >> PAGE_SHIFT, PAGE_KERNEL); diff -puN arch/x86_64/mm/ioremap.c~iounmap-fix arch/x86_64/mm/ioremap.c --- 25/arch/x86_64/mm/ioremap.c~iounmap-fix 2004-12-03 20:02:08.334048088 -0800 +++ 25-akpm/arch/x86_64/mm/ioremap.c 2004-12-03 20:05:28.619600056 -0800 @@ -128,11 +128,11 @@ void __iomem * __ioremap(unsigned long p if (phys_addr >= 0xA0000 && last_addr < 0x100000) return (__force void __iomem *)phys_to_virt(phys_addr); +#ifndef CONFIG_DISCONTIGMEM /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) { -#ifndef CONFIG_DISCONTIGMEM char *t_addr, *t_end; struct page *page; @@ -142,8 +142,8 @@ void __iomem * __ioremap(unsigned long p for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; -#endif } +#endif /* * Mappings have to be page-aligned @@ -155,7 +155,7 @@ void __iomem * __ioremap(unsigned long p /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area(size, VM_IOREMAP | (flags << 24)); if (!area) return NULL; area->phys_addr = phys_addr; @@ -195,12 +195,12 @@ void __iomem *ioremap_nocache (unsigned if (!p) return p; - if (phys_addr + size < virt_to_phys(high_memory)) { + if (phys_addr + size - 1 < virt_to_phys(high_memory)) { struct page *ppage = virt_to_page(__va(phys_addr)); unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - BUG_ON(phys_addr+size > (unsigned long)high_memory); - BUG_ON(phys_addr + size < phys_addr); + BUG_ON(phys_addr+size >= (unsigned long)high_memory); + BUG_ON(phys_addr + size <= phys_addr); if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { iounmap(p); @@ -223,7 +223,7 @@ void iounmap(volatile void __iomem *addr return; } - if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { + if ((p->flags >> 24) && p->phys_addr + p->size < virt_to_phys(high_memory)) { change_page_attr(virt_to_page(__va(p->phys_addr)), p->size >> PAGE_SHIFT, PAGE_KERNEL); _