From: Andrea Arcangeli Reject zero page vm-area request, align size properly and hide the guard page from the callers like ioremap - this avoids a kernel crash due one more page being passed to change_page_attr(). Signed-off-by: Andrea Arcangeli Signed-off-by: Andrew Morton --- 25-akpm/arch/i386/mm/ioremap.c | 6 +++--- 25-akpm/arch/x86_64/mm/ioremap.c | 4 ++-- 25-akpm/mm/vmalloc.c | 15 +++++++++++---- 3 files changed, 16 insertions(+), 9 deletions(-) diff -puN arch/i386/mm/ioremap.c~pageattr-guard-page arch/i386/mm/ioremap.c --- 25/arch/i386/mm/ioremap.c~pageattr-guard-page 2004-12-03 20:04:25.803149608 -0800 +++ 25-akpm/arch/i386/mm/ioremap.c 2004-12-03 20:04:25.809148696 -0800 @@ -195,9 +195,9 @@ void __iomem *ioremap_nocache (unsigned return p; /* Guaranteed to be > phys_addr, as per __ioremap() */ - last_addr = phys_addr + size - 1; + last_addr = phys_addr + size; - if (last_addr < virt_to_phys(high_memory)) { + if (last_addr <= virt_to_phys(high_memory)) { struct page *ppage = virt_to_page(__va(phys_addr)); unsigned long npages; @@ -232,7 +232,7 @@ void iounmap(volatile void __iomem *addr return; } - if ((p->flags >> 24) && p->phys_addr < virt_to_phys(high_memory)) { + if ((p->flags >> 24) && p->phys_addr + p->size <= virt_to_phys(high_memory)) { change_page_attr(virt_to_page(__va(p->phys_addr)), p->size >> PAGE_SHIFT, PAGE_KERNEL); diff -puN arch/x86_64/mm/ioremap.c~pageattr-guard-page arch/x86_64/mm/ioremap.c --- 25/arch/x86_64/mm/ioremap.c~pageattr-guard-page 2004-12-03 20:04:25.804149456 -0800 +++ 25-akpm/arch/x86_64/mm/ioremap.c 2004-12-03 20:04:25.809148696 -0800 @@ -195,7 +195,7 @@ void __iomem *ioremap_nocache (unsigned if (!p) return p; - if (phys_addr + size - 1 < virt_to_phys(high_memory)) { + if (phys_addr + size <= virt_to_phys(high_memory)) { struct page *ppage = virt_to_page(__va(phys_addr)); unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -223,7 +223,7 @@ void iounmap(volatile void __iomem *addr return; } - if ((p->flags >> 24) && p->phys_addr + p->size < virt_to_phys(high_memory)) { + if ((p->flags >> 24) && p->phys_addr + p->size <= virt_to_phys(high_memory)) { change_page_attr(virt_to_page(__va(p->phys_addr)), p->size >> PAGE_SHIFT, PAGE_KERNEL); diff -puN mm/vmalloc.c~pageattr-guard-page mm/vmalloc.c --- 25/mm/vmalloc.c~pageattr-guard-page 2004-12-03 20:04:25.806149152 -0800 +++ 25-akpm/mm/vmalloc.c 2004-12-03 20:04:25.810148544 -0800 @@ -199,20 +199,22 @@ struct vm_struct *__get_vm_area(unsigned align = 1ul << bit; } addr = ALIGN(start, align); + size = PAGE_ALIGN(size); area = kmalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) return NULL; - /* - * We always allocate a guard page. - */ - size += PAGE_SIZE; if (unlikely(!size)) { kfree (area); return NULL; } + /* + * We always allocate a guard page. + */ + size += PAGE_SIZE; + write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { if ((unsigned long)tmp->addr < addr) { @@ -292,6 +294,11 @@ found: unmap_vm_area(tmp); *p = tmp->next; write_unlock(&vmlist_lock); + + /* + * Remove the guard page. + */ + tmp->size -= PAGE_SIZE; return tmp; } _