From: Ian Molton Replace the macros for atomic ops and bring arm26 up to current definitions. make the arm26 mm code use the proper macros also. Signed-off-by: Andrew Morton --- 25-akpm/arch/arm26/mm/init.c | 5 - 25-akpm/include/asm-arm26/atomic.h | 109 +++++++++++-------------------------- 25-akpm/include/asm-arm26/bitops.h | 73 +++++++++--------------- 3 files changed, 63 insertions(+), 124 deletions(-) diff -puN arch/arm26/mm/init.c~arm26-update-the-atomic-ops arch/arm26/mm/init.c --- 25/arch/arm26/mm/init.c~arm26-update-the-atomic-ops Thu Dec 23 14:22:01 2004 +++ 25-akpm/arch/arm26/mm/init.c Thu Dec 23 14:22:01 2004 @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mm/init.c + * linux/arch/arm26/mm/init.c * * Copyright (C) 1995-2002 Russell King * @@ -26,7 +26,6 @@ #include #include -#include #include #include #include @@ -84,7 +83,7 @@ void show_mem(void) else if (!page_count(page)) free++; else - shared += atomic_read(&page->count) - 1; + shared += page_count(page) - 1; page++; } while (page < end); diff -puN include/asm-arm26/atomic.h~arm26-update-the-atomic-ops include/asm-arm26/atomic.h --- 25/include/asm-arm26/atomic.h~arm26-update-the-atomic-ops Thu Dec 23 14:22:01 2004 +++ 25-akpm/include/asm-arm26/atomic.h Thu Dec 23 14:22:01 2004 @@ -2,12 +2,14 @@ * linux/include/asm-arm26/atomic.h * * Copyright (c) 1996 Russell King. + * Modified for arm26 by Ian Molton * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Changelog: + * 25-11-2004 IM Updated for 2.6.9 * 27-06-1996 RMK Created * 13-04-1997 RMK Made functions atomic! * 07-12-1997 RMK Upgraded for v2.1. @@ -31,102 +33,57 @@ typedef struct { volatile int counter; } #ifdef __KERNEL__ #include -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) -static inline void atomic_add(int i, volatile atomic_t *v) +static inline int atomic_add_return(int i, atomic_t *v) { - unsigned long flags; + unsigned long flags; + int val; - local_irq_save(flags); - v->counter += i; - local_irq_restore(flags); -} - -static inline void atomic_sub(int i, volatile atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - v->counter -= i; - local_irq_restore(flags); -} - -static inline void atomic_inc(volatile atomic_t *v) -{ - unsigned long flags; - - local_irq_save(flags); - v->counter += 1; - local_irq_restore(flags); -} - -static inline void atomic_dec(volatile atomic_t *v) -{ - unsigned long flags; + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); - local_irq_save(flags); - v->counter -= 1; - local_irq_restore(flags); + return val; } -static inline int atomic_dec_and_test(volatile atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - unsigned long flags; - int val; + unsigned long flags; + int val; - local_irq_save(flags); - val = v->counter; - v->counter = val -= 1; - local_irq_restore(flags); + local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + local_irq_restore(flags); - return val == 0; -} - -static inline int atomic_add_negative(int i, volatile atomic_t *v) -{ - unsigned long flags; - int val; - - local_irq_save(flags); - val = v->counter; - v->counter = val += i; - local_irq_restore(flags); - - return val < 0; + return val; } static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) { - unsigned long flags; + unsigned long flags; - local_irq_save(flags); - *addr &= ~mask; - local_irq_restore(flags); + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); } -static inline int atomic_add_return(int i, volatile atomic_t *v) -{ - unsigned long flags; - int val; +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) - local_irq_save(flags); - val = v->counter + i; - v->counter = val; - local_irq_restore(flags); - - return val; -} - -static inline int atomic_sub_return(int i, volatile atomic_t *v) -{ - return atomic_add_return(-i, v); -} +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -/* Atomic operations are already serializing on ARM */ +/* Atomic operations are already serializing on ARM26 */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() diff -puN include/asm-arm26/bitops.h~arm26-update-the-atomic-ops include/asm-arm26/bitops.h --- 25/include/asm-arm26/bitops.h~arm26-update-the-atomic-ops Thu Dec 23 14:22:01 2004 +++ 25-akpm/include/asm-arm26/bitops.h Thu Dec 23 14:22:01 2004 @@ -1,15 +1,15 @@ /* * Copyright 1995, Russell King. - * Various bits and pieces copyrights include: - * Linus Torvalds (test_bit). - * Big endian support: Copyright 2001, Nicolas Pitre - * reworked by rmk. * - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + * Based on the arm32 version by RMK (and others). Their copyrights apply to + * Those parts. + * Modified for arm26 by Ian Molton on 25/11/04 + * + * bit 0 is the LSB of an "unsigned long" quantity. * * Please note that the code in this file should never be included * from user space. Many of these are not implemented in assembler - * since they would be too costly. Also, they require priviledged + * since they would be too costly. Also, they require privileged * instructions (which are not available from user mode) to ensure * that they are atomic. */ @@ -19,6 +19,7 @@ #ifdef __KERNEL__ +#include #include #define smp_mb__before_clear_bit() do { } while (0) @@ -26,12 +27,10 @@ /* * These functions are the basis of our bit ops. - * First, the atomic bitops. * - * The endian issue for these functions is handled by the macros below. + * First, the atomic bitops. These use native endian. */ -static inline void -____atomic_set_bit(unsigned int bit, volatile unsigned long *p) +static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned long mask = 1UL << (bit & 31); @@ -43,8 +42,7 @@ ____atomic_set_bit(unsigned int bit, vol local_irq_restore(flags); } -static inline void -____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) +static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned long mask = 1UL << (bit & 31); @@ -56,8 +54,7 @@ ____atomic_clear_bit(unsigned int bit, v local_irq_restore(flags); } -static inline void -____atomic_change_bit(unsigned int bit, volatile unsigned long *p) +static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned long mask = 1UL << (bit & 31); @@ -104,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned i } static inline int -____atomic_test_and_change_bit_mask(unsigned int bit, volatile unsigned long *p) +____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; @@ -158,7 +155,6 @@ static inline int __test_and_clear_bit(i oldval = *p; *p = oldval & ~mask; - return oldval & mask; } @@ -170,30 +166,18 @@ static inline int __test_and_change_bit( oldval = *p; *p = oldval ^ mask; - return oldval & mask; } /* * This routine doesn't need to be atomic. */ -static inline int __test_bit(int nr, const unsigned long * p) +static inline int __test_bit(int nr, const volatile unsigned long * p) { - return p[nr >> 5] & (1UL << (nr & 31)); + return (p[nr >> 5] >> (nr & 31)) & 1UL; } /* - * A note about Endian-ness. - * ------------------------- - * - * ------------ physical data bus bits ----------- - * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 - * byte 3 byte 2 byte 1 byte 0 - * - * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. - */ - -/* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ extern void _set_bit_le(int nr, volatile unsigned long * p); @@ -204,6 +188,8 @@ extern int _test_and_clear_bit_le(int nr extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_le(void * p, unsigned size); extern int _find_next_zero_bit_le(void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * The __* form of bitops are non-atomic and may be reordered. @@ -213,11 +199,6 @@ extern int _find_next_zero_bit_le(void * ____atomic_##name(nr, p) : \ _##name##_le(nr,p)) -#define ATOMIC_BITOP_BE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_be(nr,p)) - #define NONATOMIC_BITOP(name,nr,p) \ (____nonatomic_##name(nr, p)) @@ -233,6 +214,8 @@ extern int _find_next_zero_bit_le(void * #define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) #define WORD_BITOFF_TO_LE(x) ((x)) @@ -315,15 +298,15 @@ static inline int sched_find_first_bit(u * These do not need to be atomic. */ #define ext2_set_bit(nr,p) \ - __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) -#define ext2_set_bit_atomic(lock,nr,p) \ - test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) +#define ext2_set_bit_atomic(lock,nr,p) \ + test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_clear_bit(nr,p) \ - __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_clear_bit_atomic(lock,nr,p) \ - test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ @@ -334,13 +317,13 @@ static inline int sched_find_first_bit(u * These do not need to be atomic. */ #define minix_set_bit(nr,p) \ - __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_set_bit(nr,p) \ - __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_clear_bit(nr,p) \ - __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)p) + __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) _