From: Ingo Molnar 1. typos/spelling ;-) 2. removed prev_vma and find_vma_prev because the condition checked later was always true 3. moved the free_area_cache/mmap_base check into arch_unmap_area_topdown where i think it belongs. 4. removed the extra free_area_cache setting code in the while loop as it only has to be set when we actually (and successfully) return from this function. The only visible change to the layout should be the following: ---> direction of allocation ---> [1] [2] [3] XXXXXXXXXX.....XXXXXXXX.XXXXXXXXXXXXXXXX............. ^---{ mmap_top } ^------{ free_area_cache } if ->free_area_cache is pointing to [2] and if there's an allocation request with a size that doesnt fit into [2] but fits into [1] (and [3]) then the previous code would return [3], while the modified code restarts the search from ->mmap_top and thus returns [1]. it remains to be seen whether this allocation policy causes any performance regressions in applications that use thousands of separate vmas. (e.g. test_str02.c, or UML) One such regression scenario would be if the are lots of vmas chained between ->mmap_top and [2], and small holes appear regularly that cause ->free_area_cache failure. From: PaX Team Signed-off-by: Ingo Molnar Acked-By: Arjan van de Ven Signed-off-by: Andrew Morton --- 25-akpm/mm/mmap.c | 59 ++++++++++++++++++++---------------------------------- 1 files changed, 22 insertions(+), 37 deletions(-) diff -puN mm/mmap.c~simpler-topdown-mmap-layout-allocator mm/mmap.c --- 25/mm/mmap.c~simpler-topdown-mmap-layout-allocator 2005-01-22 18:29:18.853179744 -0800 +++ 25-akpm/mm/mmap.c 2005-01-22 18:29:18.858178984 -0800 @@ -1221,19 +1221,14 @@ arch_get_unmapped_area_topdown(struct fi const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma, *prev_vma; + struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long base = mm->mmap_base, addr = addr0; - int first_time = 1; + unsigned long addr = addr0; /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; - /* dont allow allocations above current base */ - if (mm->free_area_cache > base) - mm->free_area_cache = base; - /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); @@ -1243,48 +1238,34 @@ arch_get_unmapped_area_topdown(struct fi return addr; } -try_again: + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + /* make sure it can fit in the remaining address space */ - if (mm->free_area_cache < len) - goto fail; + if (addr >= len) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + + addr = mm->mmap_base-len; - /* either no address requested or cant fit in requested address hole */ - addr = (mm->free_area_cache - len) & PAGE_MASK; do { /* * Lookup failure means no vma is above this address, - * i.e. return with success: + * else if new region fits below vma->vm_start, + * return with success: */ - if (!(vma = find_vma_prev(mm, addr, &prev_vma))) - return addr; - - /* - * new region fits between prev_vma->vm_end and - * vma->vm_start, use it: - */ - if (addr+len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) + vma = find_vma(mm, addr); + if (!vma || addr+len <= vma->vm_start) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); - else - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) - mm->free_area_cache = vma->vm_start; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; } while (len <= vma->vm_start); -fail: - /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - first_time = 0; - goto try_again; - } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario @@ -1296,7 +1277,7 @@ fail: /* * Restore the topdown base: */ - mm->free_area_cache = base; + mm->free_area_cache = mm->mmap_base; return addr; } @@ -1309,6 +1290,10 @@ void arch_unmap_area_topdown(struct vm_a */ if (area->vm_end > area->vm_mm->free_area_cache) area->vm_mm->free_area_cache = area->vm_end; + + /* dont allow allocations above current base */ + if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base) + area->vm_mm->free_area_cache = area->vm_mm->mmap_base; } unsigned long _