diff -urN x86-64-ref/arch/x86_64/config.in x86-64/arch/x86_64/config.in --- x86-64-ref/arch/x86_64/config.in Fri Mar 29 17:40:58 2002 +++ x86-64/arch/x86_64/config.in Fri Mar 29 17:41:46 2002 @@ -11,8 +11,6 @@ define_bool CONFIG_SBUS n define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n define_bool CONFIG_X86_CMPXCHG y diff -urN x86-64-ref/arch/x86_64/kernel/process.c x86-64/arch/x86_64/kernel/process.c --- x86-64-ref/arch/x86_64/kernel/process.c Fri Mar 29 17:40:58 2002 +++ x86-64/arch/x86_64/kernel/process.c Fri Mar 29 17:41:46 2002 @@ -131,7 +131,7 @@ /* endless idle loop with no priority at all */ init_idle(); current->nice = 20; - current->counter = -100; + current->dyn_prio = -100; while (1) { void (*idle)(void) = pm_idle; diff -urN x86-64-ref/arch/x86_64/mm/fault.c x86-64/arch/x86_64/mm/fault.c --- x86-64-ref/arch/x86_64/mm/fault.c Fri Mar 29 17:40:58 2002 +++ x86-64/arch/x86_64/mm/fault.c Fri Mar 29 17:41:46 2002 @@ -100,7 +100,7 @@ { struct task_struct *tsk; struct mm_struct *mm; - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long address; unsigned long fixup; int write; @@ -158,7 +158,8 @@ if (address + 128 < regs->rsp) goto bad_area; } - if (expand_stack(vma, address)) + find_vma_prev(mm, address, &prev_vma); + if (expand_stack(vma, address, prev_vma)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN x86-64-ref/arch/x86_64/tools/offset.c x86-64/arch/x86_64/tools/offset.c --- x86-64-ref/arch/x86_64/tools/offset.c Fri Mar 29 17:40:58 2002 +++ x86-64/arch/x86_64/tools/offset.c Fri Mar 29 17:41:46 2002 @@ -10,7 +10,6 @@ #include #include #include -#include #include #define output(x) asm volatile ("--- " x) diff -urN x86-64-ref/include/asm-x86_64/fcntl.h x86-64/include/asm-x86_64/fcntl.h --- x86-64-ref/include/asm-x86_64/fcntl.h Fri Mar 29 17:40:58 2002 +++ x86-64/include/asm-x86_64/fcntl.h Fri Mar 29 17:41:46 2002 @@ -20,6 +20,7 @@ #define O_LARGEFILE 0100000 #define O_DIRECTORY 0200000 /* must be a directory */ #define O_NOFOLLOW 0400000 /* don't follow links */ +#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */ #define F_DUPFD 0 /* dup */ #define F_GETFD 1 /* get close_on_exec */ diff -urN x86-64-ref/include/asm-x86_64/prefetch.h x86-64/include/asm-x86_64/prefetch.h --- x86-64-ref/include/asm-x86_64/prefetch.h Thu Jan 1 01:00:00 1970 +++ x86-64/include/asm-x86_64/prefetch.h Fri Mar 29 17:43:38 2002 @@ -0,0 +1,12 @@ +#ifndef __ASM_X86_64_PREFETCH_H +#define __ASM_X86_64_PREFETCH_H + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#define prefetch(x) __builtin_prefetch((x),0) +#define prefetchw(x) __builtin_prefetch((x),1) +#define spin_lock_prefetch(x) prefetchw(x) + +#endif /* __ASM_X86_64_PREFETCH_H */ diff -urN x86-64-ref/include/asm-x86_64/processor.h x86-64/include/asm-x86_64/processor.h --- x86-64-ref/include/asm-x86_64/processor.h Fri Mar 29 17:40:58 2002 +++ x86-64/include/asm-x86_64/processor.h Fri Mar 29 17:52:22 2002 @@ -416,13 +416,6 @@ #define cpu_has_fpu 1 -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -#define prefetch(x) __builtin_prefetch((x),0) -#define prefetchw(x) __builtin_prefetch((x),1) -#define spin_lock_prefetch(x) prefetchw(x) #define cpu_relax() rep_nop() diff -urN x86-64-ref/include/asm-x86_64/rwsem.h x86-64/include/asm-x86_64/rwsem.h --- x86-64-ref/include/asm-x86_64/rwsem.h Fri Mar 29 17:40:58 2002 +++ x86-64/include/asm-x86_64/rwsem.h Thu Jan 1 01:00:00 1970 @@ -1,217 +0,0 @@ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+ - * - * Written by David Howells (dhowells@redhat.com). - * Ported by Andi Kleen to x86-64. - * - * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - -#ifndef _X8664_RWSEM_H -#define _X8664_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead -#endif - -#ifdef __KERNEL__ - -#include -#include - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed int count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - LOCK_SECTION_START("") \ - "2:\n\t" - " call rwsem_down_read_failed_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END \ - "# ending down_read\n\t" - : "+m"(sem->count) - : "D"(sem) - : "memory", "cc"); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " call rwsem_down_write_failed_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending down_write" - : "=&r" (tmp) - : "0"(tmp), "D"(sem) - : "memory", "cc"); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %%dx\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending __up_read\n" - : "+m"(sem->count), "+d"(tmp) - : "D"(sem) - : "memory", "cc"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %2,%%edx\n\t" -LOCK_PREFIX " xaddl %%edx,(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jnz 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - LOCK_SECTION_START("") - "2:\n\t" - " decw %%dx\n\t" /* did the active count reduce to 0? */ - " jnz 1b\n\t" /* jump back if not */ - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - LOCK_SECTION_END - "# ending __up_write\n" - : "+m"(sem->count) - : "D"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc", "rdx"); -} - -/* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) -{ - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - :"=m"(sem->count) - :"ir"(delta), "m"(sem->count)); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( -LOCK_PREFIX "xaddl %0,(%2)" - : "=r"(tmp), "=m"(sem->count) - : "r"(sem), "m"(sem->count), "0" (tmp) - : "memory"); - - return tmp+delta; -} - -#endif /* __KERNEL__ */ -#endif /* _X8664_RWSEM_H */ diff -urN x86-64-ref/include/asm-x86_64/timex.h x86-64/include/asm-x86_64/timex.h --- x86-64-ref/include/asm-x86_64/timex.h Fri Mar 29 17:40:58 2002 +++ x86-64/include/asm-x86_64/timex.h Fri Mar 29 17:41:46 2002 @@ -48,4 +48,8 @@ extern unsigned int cpu_khz; +typedef cycles_t last_schedule_t; +#define get_last_schedule() ({ get_cycles(); }) +#define last_schedule_before(a, b) ({ a < b; }) + #endif diff -urN x86-64-ref/include/asm-x86_64/unistd.h x86-64/include/asm-x86_64/unistd.h --- x86-64-ref/include/asm-x86_64/unistd.h Fri Mar 29 17:40:58 2002 +++ x86-64/include/asm-x86_64/unistd.h Fri Mar 29 17:41:46 2002 @@ -592,7 +592,7 @@ return sys_setsid(); } -extern ssize_t sys_write(unsigned int, char *, size_t); +extern asmlinkage ssize_t sys_write(unsigned int fd, const char * buf, size_t count); static inline ssize_t write(unsigned int fd, char * buf, size_t count) { return sys_write(fd, buf, count);