From: "Mikael Starvik" Patches to support SMP. * Each CPU has its own current_pgd. * flush_tlb_range is implemented as flush_tlb_mm. * Atomic operations implemented with spinlocks. * Semaphores implemented with spinlocks. Signed-off-by: Mikael Starvik Signed-off-by: Andrew Morton --- arch/cris/arch-v10/mm/fault.c | 26 +------------- arch/cris/arch-v10/mm/init.c | 2 - arch/cris/arch-v10/mm/tlb.c | 49 --------------------------- include/asm-cris/arch-v10/atomic.h | 7 +++ include/asm-cris/atomic.h | 66 +++++++++++++------------------------ include/asm-cris/mmu_context.h | 2 - include/asm-cris/semaphore.h | 21 +++++------ include/asm-cris/smp.h | 7 +++ include/asm-cris/spinlock.h | 1 include/asm-cris/tlbflush.h | 19 ++++++++-- 10 files changed, 69 insertions(+), 131 deletions(-) diff -puN arch/cris/arch-v10/mm/fault.c~cris-update-13-17-smp arch/cris/arch-v10/mm/fault.c --- 25/arch/cris/arch-v10/mm/fault.c~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/arch/cris/arch-v10/mm/fault.c 2005-06-25 14:20:05.000000000 -0700 @@ -14,6 +14,7 @@ #include #include #include +#include /* debug of low-level TLB reload */ #undef DEBUG @@ -24,8 +25,6 @@ #define D(x) #endif -extern volatile pgd_t *current_pgd; - extern const struct exception_table_entry *search_exception_tables(unsigned long addr); @@ -46,7 +45,7 @@ handle_mmu_bus_fault(struct pt_regs *reg int page_id; int acc, inv; #endif - pgd_t* pgd = (pgd_t*)current_pgd; + pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); pmd_t *pmd; pte_t pte; int miss, we, writeac; @@ -94,24 +93,3 @@ handle_mmu_bus_fault(struct pt_regs *reg *R_TLB_LO = pte_val(pte); local_irq_restore(flags); } - -/* Called from arch/cris/mm/fault.c to find fixup code. */ -int -find_fixup_code(struct pt_regs *regs) -{ - const struct exception_table_entry *fixup; - - if ((fixup = search_exception_tables(regs->irp)) != 0) { - /* Adjust the instruction pointer in the stackframe. */ - regs->irp = fixup->fixup; - - /* - * Don't return by restoring the CPU state, so switch - * frame-type. - */ - regs->frametype = CRIS_FRAME_NORMAL; - return 1; - } - - return 0; -} diff -puN arch/cris/arch-v10/mm/init.c~cris-update-13-17-smp arch/cris/arch-v10/mm/init.c --- 25/arch/cris/arch-v10/mm/init.c~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/arch/cris/arch-v10/mm/init.c 2005-06-25 14:20:05.000000000 -0700 @@ -42,7 +42,7 @@ paging_init(void) * switch_mm) */ - current_pgd = init_mm.pgd; + per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; /* initialise the TLB (tlb.c) */ diff -puN arch/cris/arch-v10/mm/tlb.c~cris-update-13-17-smp arch/cris/arch-v10/mm/tlb.c --- 25/arch/cris/arch-v10/mm/tlb.c~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/arch/cris/arch-v10/mm/tlb.c 2005-06-25 14:20:05.000000000 -0700 @@ -139,53 +139,6 @@ flush_tlb_page(struct vm_area_struct *vm local_irq_restore(flags); } -/* invalidate a page range */ - -void -flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - int page_id = mm->context.page_id; - int i; - unsigned long flags; - - D(printk("tlb: flush range %p<->%p in context %d (%p)\n", - start, end, page_id, mm)); - - if(page_id == NO_CONTEXT) - return; - - start &= PAGE_MASK; /* probably not necessary */ - end &= PAGE_MASK; /* dito */ - - /* invalidate those TLB entries that match both the mm context - * and the virtual address range - */ - - local_save_flags(flags); - local_irq_disable(); - for(i = 0; i < NUM_TLB_ENTRIES; i++) { - unsigned long tlb_hi, vpn; - *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); - tlb_hi = *R_TLB_HI; - vpn = tlb_hi & PAGE_MASK; - if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && - vpn >= start && vpn < end) { - *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | - IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); - - *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | - IO_STATE(R_TLB_LO, valid, no ) | - IO_STATE(R_TLB_LO, kernel,no ) | - IO_STATE(R_TLB_LO, we, no ) | - IO_FIELD(R_TLB_LO, pfn, 0 ) ); - } - } - local_irq_restore(flags); -} - /* dump the entire TLB for debug purposes */ #if 0 @@ -237,7 +190,7 @@ switch_mm(struct mm_struct *prev, struct * the pgd. */ - current_pgd = next->pgd; + per_cpu(current_pgd, smp_processor_id()) = next->pgd; /* switch context in the MMU */ diff -puN /dev/null include/asm-cris/arch-v10/atomic.h --- /dev/null 2003-09-15 06:40:47.000000000 -0700 +++ 25-akpm/include/asm-cris/arch-v10/atomic.h 2005-06-25 14:20:05.000000000 -0700 @@ -0,0 +1,7 @@ +#ifndef __ASM_CRIS_ARCH_ATOMIC__ +#define __ASM_CRIS_ARCH_ATOMIC__ + +#define cris_atomic_save(addr, flags) local_irq_save(flags); +#define cris_atomic_restore(addr, flags) local_irq_restore(flags); + +#endif diff -puN include/asm-cris/atomic.h~cris-update-13-17-smp include/asm-cris/atomic.h --- 25/include/asm-cris/atomic.h~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/include/asm-cris/atomic.h 2005-06-25 14:20:05.000000000 -0700 @@ -4,21 +4,14 @@ #define __ASM_CRIS_ATOMIC__ #include +#include /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ - -#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x) - -typedef struct { int counter; } atomic_t; +typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } @@ -30,29 +23,26 @@ typedef struct { int counter; } atomic_t extern __inline__ void atomic_add(int i, volatile atomic_t *v) { unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); v->counter += i; - local_irq_restore(flags); + cris_atomic_restore(v, flags); } extern __inline__ void atomic_sub(int i, volatile atomic_t *v) { unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); v->counter -= i; - local_irq_restore(flags); + cris_atomic_restore(v, flags); } extern __inline__ int atomic_add_return(int i, volatile atomic_t *v) { unsigned long flags; int retval; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = (v->counter += i); - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } @@ -62,10 +52,9 @@ extern __inline__ int atomic_sub_return( { unsigned long flags; int retval; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = (v->counter -= i); - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } @@ -73,39 +62,35 @@ extern __inline__ int atomic_sub_and_tes { int retval; unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = (v->counter -= i) == 0; - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } extern __inline__ void atomic_inc(volatile atomic_t *v) { unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); (v->counter)++; - local_irq_restore(flags); + cris_atomic_restore(v, flags); } extern __inline__ void atomic_dec(volatile atomic_t *v) { unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); (v->counter)--; - local_irq_restore(flags); + cris_atomic_restore(v, flags); } extern __inline__ int atomic_inc_return(volatile atomic_t *v) { unsigned long flags; int retval; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = (v->counter)++; - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } @@ -113,20 +98,18 @@ extern __inline__ int atomic_dec_return( { unsigned long flags; int retval; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = (v->counter)--; - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } extern __inline__ int atomic_dec_and_test(volatile atomic_t *v) { int retval; unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = --(v->counter) == 0; - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } @@ -134,10 +117,9 @@ extern __inline__ int atomic_inc_and_tes { int retval; unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(v, flags); retval = ++(v->counter) == 0; - local_irq_restore(flags); + cris_atomic_restore(v, flags); return retval; } diff -puN include/asm-cris/mmu_context.h~cris-update-13-17-smp include/asm-cris/mmu_context.h --- 25/include/asm-cris/mmu_context.h~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/include/asm-cris/mmu_context.h 2005-06-25 14:20:05.000000000 -0700 @@ -15,7 +15,7 @@ extern void switch_mm(struct mm_struct * * registers like cr3 on the i386 */ -extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */ +extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff -puN include/asm-cris/semaphore.h~cris-update-13-17-smp include/asm-cris/semaphore.h --- 25/include/asm-cris/semaphore.h~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/include/asm-cris/semaphore.h 2005-06-25 14:20:05.000000000 -0700 @@ -72,10 +72,9 @@ extern inline void down(struct semaphore might_sleep(); /* atomically decrement the semaphores count, and if its negative, we wait */ - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(sem, flags); failed = --(sem->count.counter) < 0; - local_irq_restore(flags); + cris_atomic_restore(sem, flags); if(failed) { __down(sem); } @@ -95,10 +94,9 @@ extern inline int down_interruptible(str might_sleep(); /* atomically decrement the semaphores count, and if its negative, we wait */ - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(sem, flags); failed = --(sem->count.counter) < 0; - local_irq_restore(flags); + cris_atomic_restore(sem, flags); if(failed) failed = __down_interruptible(sem); return(failed); @@ -109,13 +107,13 @@ extern inline int down_trylock(struct se unsigned long flags; int failed; - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(sem, flags); failed = --(sem->count.counter) < 0; - local_irq_restore(flags); + cris_atomic_restore(sem, flags); if(failed) failed = __down_trylock(sem); return(failed); + } /* @@ -130,10 +128,9 @@ extern inline void up(struct semaphore * int wakeup; /* atomically increment the semaphores count, and if it was negative, we wake people */ - local_save_flags(flags); - local_irq_disable(); + cris_atomic_save(sem, flags); wakeup = ++(sem->count.counter) <= 0; - local_irq_restore(flags); + cris_atomic_restore(sem, flags); if(wakeup) { __up(sem); } diff -puN include/asm-cris/smp.h~cris-update-13-17-smp include/asm-cris/smp.h --- 25/include/asm-cris/smp.h~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/include/asm-cris/smp.h 2005-06-25 14:20:05.000000000 -0700 @@ -1,4 +1,11 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#include + +extern cpumask_t phys_cpu_present_map; +#define cpu_possible_map phys_cpu_present_map + +#define __smp_processor_id() (current_thread_info()->cpu) + #endif diff -puN /dev/null include/asm-cris/spinlock.h --- /dev/null 2003-09-15 06:40:47.000000000 -0700 +++ 25-akpm/include/asm-cris/spinlock.h 2005-06-25 14:20:05.000000000 -0700 @@ -0,0 +1 @@ +#include diff -puN include/asm-cris/tlbflush.h~cris-update-13-17-smp include/asm-cris/tlbflush.h --- 25/include/asm-cris/tlbflush.h~cris-update-13-17-smp 2005-06-25 14:20:05.000000000 -0700 +++ 25-akpm/include/asm-cris/tlbflush.h 2005-06-25 14:20:05.000000000 -0700 @@ -18,13 +18,26 @@ * */ +extern void __flush_tlb_all(void); +extern void __flush_tlb_mm(struct mm_struct *mm); +extern void __flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr); + +#ifdef CONFIG_SMP extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, - unsigned long end); +#else +#define flush_tlb_all __flush_tlb_all +#define flush_tlb_mm __flush_tlb_mm +#define flush_tlb_page __flush_tlb_page +#endif + +static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} extern inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) _