diff -urN ref/include/asm-x86_64/pgtable.h x86-64/include/asm-x86_64/pgtable.h --- ref/include/asm-x86_64/pgtable.h Tue Mar 12 08:59:28 2002 +++ x86-64/include/asm-x86_64/pgtable.h Tue Mar 12 08:54:13 2002 @@ -285,11 +285,6 @@ #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) -/* - * Permanent address of a page. Obviously must never be - * called on a highmem page. - */ -#define page_address(page) ((page)->virtual) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT)))) diff -urN ref/include/asm-x86_64/prefetch.h x86-64/include/asm-x86_64/prefetch.h --- ref/include/asm-x86_64/prefetch.h Thu Jan 1 01:00:00 1970 +++ x86-64/include/asm-x86_64/prefetch.h Tue Mar 12 08:47:17 2002 @@ -0,0 +1,22 @@ +#ifndef __ASM_X86_64_PREFETCH_H +#define __ASM_X86_64_PREFETCH_H + +/* 3d now! prefetch instructions. Could also use the SSE flavours; not sure + if it makes a difference */ + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +extern inline void prefetch(const void *x) +{ + __asm__ __volatile__ ("prefetch (%0)" : : "r"(x)); +} + +extern inline void prefetchw(const void *x) +{ + __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x)); +} +#define spin_lock_prefetch(x) prefetchw(x) + +#endif /* __ASM_X86_64_PREFETCH_H */ diff -urN ref/include/asm-x86_64/processor.h x86-64/include/asm-x86_64/processor.h --- ref/include/asm-x86_64/processor.h Tue Mar 12 08:59:28 2002 +++ x86-64/include/asm-x86_64/processor.h Tue Mar 12 08:53:49 2002 @@ -445,23 +445,6 @@ #define cpu_has_fpu 1 -/* 3d now! prefetch instructions. Could also use the SSE flavours; not sure - if it makes a difference */ - -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -extern inline void prefetch(const void *x) -{ - __asm__ __volatile__ ("prefetch (%0)" : : "r"(x)); -} - -extern inline void prefetchw(const void *x) -{ - __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x)); -} -#define spin_lock_prefetch(x) prefetchw(x) #define cpu_relax() rep_nop() diff -urN ref/include/asm-x86_64/rwsem.h x86-64/include/asm-x86_64/rwsem.h --- ref/include/asm-x86_64/rwsem.h Tue Mar 12 08:59:28 2002 +++ x86-64/include/asm-x86_64/rwsem.h Thu Jan 1 01:00:00 1970 @@ -1,217 +0,0 @@ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+ - * - * Written by David Howells (dhowells@redhat.com). - * Ported by Andi Kleen to x86-64. - * - * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - -#ifndef _X8664_RWSEM_H -#define _X8664_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead -#endif - -#ifdef __KERNEL__ - -#include -#include - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%rax)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call rwsem_down_read_failed_thunk\n\t" - " jmp 1b\n" - ".previous" - "# ending down_read\n\t" - : "+m"(sem->count) - : "a"(sem) - : "memory", "cc"); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %0,(%%rax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call rwsem_down_write_failed_thunk\n\t" - " jmp 1b\n" - ".previous\n" - "# ending down_write" - : "=r" (tmp) - : "0"(tmp), "a"(sem) - : "memory", "cc"); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %%edx,(%%rax)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " decw %%dx\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_read\n" - : "+m"(sem->count), "+d"(tmp) - : "a"(sem) - : "memory", "cc"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %2,%%edx\n\t" -LOCK_PREFIX " xaddl %%edx,(%%rax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jnz 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " decw %%dx\n\t" /* did the active count reduce to 0? */ - " jnz 1b\n\t" /* jump back if not */ - " call rwsem_wake_thunk\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_write\n" - : "+m"(sem->count) - : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc", "edx"); -} - -/* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) -{ - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - :"=m"(sem->count) - :"ir"(delta), "m"(sem->count)); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( -LOCK_PREFIX "xadd %0,(%2)" - : "=r"(tmp), "=m"(sem->count) - : "r"(sem), "m"(sem->count), "0" (tmp) - : "memory"); - - return tmp+delta; -} - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_H */ diff -urN ref/include/asm-x86_64/siginfo.h x86-64/include/asm-x86_64/siginfo.h --- ref/include/asm-x86_64/siginfo.h Tue Mar 12 08:59:28 2002 +++ x86-64/include/asm-x86_64/siginfo.h Tue Mar 12 08:55:28 2002 @@ -106,6 +106,7 @@ #define SI_MESGQ -3 /* sent by real time mesq state change */ #define SI_ASYNCIO -4 /* sent by AIO completion */ #define SI_SIGIO -5 /* sent by queued SIGIO */ +#define SI_TKILL -6 /* sent by tkill system call */ #define SI_FROMUSER(siptr) ((siptr)->si_code <= 0) #define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0) diff -urN ref/include/asm-x86_64/unistd.h x86-64/include/asm-x86_64/unistd.h --- ref/include/asm-x86_64/unistd.h Tue Mar 12 08:59:29 2002 +++ x86-64/include/asm-x86_64/unistd.h Tue Mar 12 08:53:47 2002 @@ -564,7 +564,7 @@ return sys_setsid(); } -extern ssize_t sys_write(unsigned int, char *, size_t); +extern asmlinkage ssize_t sys_write(unsigned int fd, const char * buf, size_t count); static inline ssize_t write(unsigned int fd, char * buf, size_t count) { return sys_write(fd, buf, count); diff -urN x86-64-ref/arch/x86_64/config.in x86-64/arch/x86_64/config.in --- x86-64-ref/arch/x86_64/config.in Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/config.in Tue Mar 12 09:31:19 2002 @@ -11,8 +11,6 @@ define_bool CONFIG_SBUS n define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n define_bool CONFIG_X86_CMPXCHG y diff -urN x86-64-ref/arch/x86_64/kernel/process.c x86-64/arch/x86_64/kernel/process.c --- x86-64-ref/arch/x86_64/kernel/process.c Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/kernel/process.c Tue Mar 12 09:56:49 2002 @@ -130,7 +130,7 @@ /* endless idle loop with no priority at all */ init_idle(); current->nice = 20; - current->counter = -100; + current->dyn_prio = -100; while (1) { void (*idle)(void) = pm_idle; diff -urN x86-64-ref/arch/x86_64/kernel/setup64.c x86-64/arch/x86_64/kernel/setup64.c --- x86-64-ref/arch/x86_64/kernel/setup64.c Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/kernel/setup64.c Tue Mar 12 10:01:09 2002 @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff -urN x86-64-ref/arch/x86_64/kernel/time.c x86-64/arch/x86_64/kernel/time.c --- x86-64-ref/arch/x86_64/kernel/time.c Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/kernel/time.c Tue Mar 12 10:00:15 2002 @@ -630,7 +630,6 @@ #ifndef do_gettimeoffset do_gettimeoffset = do_fast_gettimeoffset; #endif - do_get_fast_time = do_gettimeofday; /* report CPU clock rate in Hz. * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) = diff -urN x86-64-ref/arch/x86_64/mm/fault.c x86-64/arch/x86_64/mm/fault.c --- x86-64-ref/arch/x86_64/mm/fault.c Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/mm/fault.c Tue Mar 12 10:10:36 2002 @@ -96,7 +96,7 @@ { struct task_struct *tsk; struct mm_struct *mm; - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long address; unsigned long fixup; int write; @@ -152,7 +152,8 @@ if (address + 128 < regs->rsp) goto bad_area; } - if (expand_stack(vma, address)) + find_vma_prev(mm, address, &prev_vma); + if (expand_stack(vma, address, prev_vma)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN x86-64-ref/arch/x86_64/tools/offset.c x86-64/arch/x86_64/tools/offset.c --- x86-64-ref/arch/x86_64/tools/offset.c Tue Mar 12 10:37:36 2002 +++ x86-64/arch/x86_64/tools/offset.c Tue Mar 12 09:32:41 2002 @@ -10,7 +10,6 @@ #include #include #include -#include #include #define output(x) asm volatile ("--- " x) diff -urN x86-64-ref/include/asm-x86_64/fcntl.h x86-64/include/asm-x86_64/fcntl.h --- x86-64-ref/include/asm-x86_64/fcntl.h Tue Mar 12 10:37:36 2002 +++ x86-64/include/asm-x86_64/fcntl.h Tue Mar 12 09:50:11 2002 @@ -20,6 +20,7 @@ #define O_LARGEFILE 0100000 #define O_DIRECTORY 0200000 /* must be a directory */ #define O_NOFOLLOW 0400000 /* don't follow links */ +#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */ #define F_DUPFD 0 /* dup */ #define F_GETFD 1 /* get close_on_exec */ diff -urN x86-64-ref/include/asm-x86_64/page.h x86-64/include/asm-x86_64/page.h --- x86-64-ref/include/asm-x86_64/page.h Tue Mar 12 10:37:36 2002 +++ x86-64/include/asm-x86_64/page.h Tue Mar 12 09:43:53 2002 @@ -127,6 +127,8 @@ #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #endif /* __KERNEL__ */ diff -urN x86-64-ref/include/asm-x86_64/pgtable.h x86-64/include/asm-x86_64/pgtable.h --- x86-64-ref/include/asm-x86_64/pgtable.h Tue Mar 12 10:37:36 2002 +++ x86-64/include/asm-x86_64/pgtable.h Tue Mar 12 09:45:04 2002 @@ -35,6 +35,7 @@ #define flush_dcache_page(page) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define __flush_tlb() \ do { \ diff -urN x86-64-ref/include/asm-x86_64/timex.h x86-64/include/asm-x86_64/timex.h --- x86-64-ref/include/asm-x86_64/timex.h Tue Mar 12 10:37:36 2002 +++ x86-64/include/asm-x86_64/timex.h Tue Mar 12 08:53:49 2002 @@ -48,4 +48,8 @@ extern unsigned int cpu_khz; +typedef cycles_t last_schedule_t; +#define get_last_schedule() ({ get_cycles(); }) +#define last_schedule_before(a, b) ({ a < b; }) + #endif