diff -aurpN -X /home/fletch/.diff.exclude 390-slabtune/arch/i386/Kconfig 410-topdown/arch/i386/Kconfig --- 390-slabtune/arch/i386/Kconfig Wed Feb 11 10:14:55 2004 +++ 410-topdown/arch/i386/Kconfig Wed Feb 11 10:15:39 2004 @@ -1617,6 +1617,15 @@ config KGDB_SYSRQ to interrupt the system before the serial port control C is available. Just say yes here. +config MMAP_TOPDOWN + bool "Top-down vma allocation" + help + Say Y here to have the kernel change its vma allocation policy + to allocate vma's from the top of the address space down, and + to shove the stack low so as to conserve virtualspace. This is + risky because various apps, including a number of versions of + ld.so, depend on the kernel's bottom-up behavior. + config FRAME_POINTER bool "Compile the kernel with frame pointers" default KGDB diff -aurpN -X /home/fletch/.diff.exclude 390-slabtune/arch/i386/mm/pgtable.c 410-topdown/arch/i386/mm/pgtable.c --- 390-slabtune/arch/i386/mm/pgtable.c Mon Feb 9 10:11:59 2004 +++ 410-topdown/arch/i386/mm/pgtable.c Wed Feb 11 10:15:39 2004 @@ -320,3 +320,59 @@ out_free: kmem_cache_free(pgd_cache, pgd); } +#define GLIBC_BUFFER (32*1024*1024) + +/* + * This is total crap; it needs to use the free area cache to mitigate + * catastrophic O(n) search with many vmas. + */ +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *prev; + + len = PAGE_ALIGN(len); + addr = PAGE_ALIGN(addr); + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + struct vm_area_struct *vma; + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + goto out; + } + + if (!mm->mmap) { + if (len > TASK_SIZE - GLIBC_BUFFER) + addr = TASK_SIZE - len; + else + addr = TASK_SIZE - GLIBC_BUFFER - len; + goto out; + } + + addr = -ENOMEM; + for (prev = NULL, vma = mm->mmap; vma; prev = vma, vma = vma->vm_next) { + unsigned long lo, hi; + lo = prev ? prev->vm_end : 0; + hi = vma->vm_start; + if (hi - lo >= len && (addr == -ENOMEM || addr < hi - len)) + addr = hi - len; + } + /* + * We're at the last one; let's try the top, but only if nothing + * else can be found (to respect GLIBC_BUFFER). + */ + if (prev && TASK_SIZE - prev->vm_end >= len) { + if (TASK_SIZE - GLIBC_BUFFER - prev->vm_end >= len) + addr = TASK_SIZE - GLIBC_BUFFER - len; + else if (addr == -ENOMEM) + addr = TASK_SIZE - len; + } +out: + return addr; +} diff -aurpN -X /home/fletch/.diff.exclude 390-slabtune/fs/binfmt_elf.c 410-topdown/fs/binfmt_elf.c --- 390-slabtune/fs/binfmt_elf.c Wed Feb 11 10:14:22 2004 +++ 410-topdown/fs/binfmt_elf.c Wed Feb 11 10:15:39 2004 @@ -7,6 +7,7 @@ * Tools". * * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). + * Top-down vma allocation support, William Irwin, IBM, 2003 */ #include @@ -333,8 +334,13 @@ static unsigned long load_elf_interp(str if (retval < 0) goto out_close; +#ifndef CONFIG_MMAP_TOPDOWN eppnt = elf_phdata; for (i=0; ie_phnum; i++, eppnt++) { +#else + eppnt = &elf_phdata[interp_elf_ex->e_phnum - 1]; + for (i = interp_elf_ex->e_phnum - 1; i >= 0; --i, --eppnt) { +#endif if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; int elf_prot = 0; @@ -348,7 +354,8 @@ static unsigned long load_elf_interp(str if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) elf_type |= MAP_FIXED; - map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type); + map_addr = load_addr_set ? load_addr + vaddr : 0; + map_addr = elf_map(interpreter, map_addr, eppnt, elf_prot, elf_type); error = map_addr; if (BAD_ADDR(map_addr)) goto out_close; diff -aurpN -X /home/fletch/.diff.exclude 390-slabtune/include/asm-i386/a.out.h 410-topdown/include/asm-i386/a.out.h --- 390-slabtune/include/asm-i386/a.out.h Sun Nov 17 20:29:32 2002 +++ 410-topdown/include/asm-i386/a.out.h Wed Feb 11 10:15:39 2004 @@ -19,7 +19,16 @@ struct exec #ifdef __KERNEL__ +/* + * Typical ELF load address is 0x8048000, which is 128MB + 288KB. + * Shoving the stack very close to it lets smaller programs fit in + * a single pagetable page's worth of virtualspace. + */ +#ifdef CONFIG_MMAP_TOPDOWN +#define STACK_TOP ((128 << 20) + (256 << 10)) +#else #define STACK_TOP TASK_SIZE +#endif #endif diff -aurpN -X /home/fletch/.diff.exclude 390-slabtune/include/asm-i386/pgtable.h 410-topdown/include/asm-i386/pgtable.h --- 390-slabtune/include/asm-i386/pgtable.h Mon Feb 9 10:11:59 2004 +++ 410-topdown/include/asm-i386/pgtable.h Wed Feb 11 10:15:39 2004 @@ -25,6 +25,10 @@ #include #include +#ifdef CONFIG_MMAP_TOPDOWN +#define HAVE_ARCH_UNMAPPED_AREA +#endif + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc..