Patch from Anton Blanchard ppc64/kernel/htab.c | 4 ++++ ppc64/kernel/open_pic.c | 30 +++++++++++++----------------- ppc64/kernel/pSeries_lpar.c | 7 ++++++- ppc64/kernel/signal32.c | 2 ++ ppc64/kernel/xics.c | 8 ++++++-- ppc64/mm/init.c | 1 + asm-ppc64/hardirq.h | 2 +- asm-ppc64/semaphore.h | 14 +++++++------- 8 files changed, 40 insertions(+), 28 deletions(-) diff -puN arch/ppc64/kernel/htab.c~anton-1 arch/ppc64/kernel/htab.c --- 25/arch/ppc64/kernel/htab.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/htab.c 2003-02-19 11:56:55.000000000 -0800 @@ -46,6 +46,7 @@ #include #include #include +#include /* * Note: pte --> Linux PTE @@ -348,6 +349,9 @@ repeat: } } + if (unlikely(slot == -2)) + panic("hash_page: pte_insert failed\n"); + pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX; /* diff -puN arch/ppc64/kernel/open_pic.c~anton-1 arch/ppc64/kernel/open_pic.c --- 25/arch/ppc64/kernel/open_pic.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/open_pic.c 2003-02-19 11:56:55.000000000 -0800 @@ -555,14 +555,15 @@ void openpic_request_IPIs(void) if (OpenPIC == NULL) return; - request_irq(openpic_vec_ipi, - openpic_ipi_action, 0, "IPI0 (call function)", 0); - request_irq(openpic_vec_ipi+1, - openpic_ipi_action, 0, "IPI1 (reschedule)", 0); - request_irq(openpic_vec_ipi+2, - openpic_ipi_action, 0, "IPI2 (invalidate tlb)", 0); - request_irq(openpic_vec_ipi+3, - openpic_ipi_action, 0, "IPI3 (xmon break)", 0); + /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ + request_irq(openpic_vec_ipi, openpic_ipi_action, SA_INTERRUPT, + "IPI0 (call function)", 0); + request_irq(openpic_vec_ipi+1, openpic_ipi_action, SA_INTERRUPT, + "IPI1 (reschedule)", 0); + request_irq(openpic_vec_ipi+2, openpic_ipi_action, SA_INTERRUPT, + "IPI2 (invalidate tlb)", 0); + request_irq(openpic_vec_ipi+3, openpic_ipi_action, SA_INTERRUPT, + "IPI3 (xmon break)", 0); for ( i = 0; i < OPENPIC_NUM_IPI ; i++ ) openpic_enable_ipi(openpic_vec_ipi+i); @@ -754,17 +755,12 @@ static void openpic_set_affinity(unsigne #ifdef CONFIG_SMP static void openpic_end_ipi(unsigned int irq_nr) { - /* IPIs are marked IRQ_PER_CPU. This has the side effect of + /* + * IPIs are marked IRQ_PER_CPU. This has the side effect of * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from * applying to them. We EOI them late to avoid re-entering. - * however, I'm wondering if we could simply let them have the - * SA_INTERRUPT flag and let them execute with all interrupts OFF. - * This would have the side effect of either running cross-CPU - * functions with interrupts off, or we can re-enable them explicitely - * with a local_irq_enable() in smp_call_function_interrupt(), since - * smp_call_function() is protected by a spinlock. - * Or maybe we shouldn't set the IRQ_PER_CPU flag on cross-CPU - * function calls IPI at all but that would make a special case. + * We mark IPI's with SA_INTERRUPT as they must run with + * irqs disabled. */ openpic_eoi(); } diff -puN arch/ppc64/kernel/pSeries_lpar.c~anton-1 arch/ppc64/kernel/pSeries_lpar.c --- 25/arch/ppc64/kernel/pSeries_lpar.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/pSeries_lpar.c 2003-02-19 11:56:55.000000000 -0800 @@ -460,8 +460,13 @@ long pSeries_lpar_hpte_insert(unsigned l if (lpar_rc == H_PTEG_Full) return -1; + /* + * Since we try and ioremap PHBs we dont own, the pte insert + * will fail. However we must catch the failure in hash_page + * or we will loop forever, so return -2 in this case. + */ if (lpar_rc != H_Success) - panic("Bad return code from pte enter rc = %lx\n", lpar_rc); + return -2; return slot; } diff -puN arch/ppc64/kernel/signal32.c~anton-1 arch/ppc64/kernel/signal32.c --- 25/arch/ppc64/kernel/signal32.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/signal32.c 2003-02-19 11:56:55.000000000 -0800 @@ -29,6 +29,8 @@ #include #include +#define DEBUG_SIG 0 + #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* * These are the flags in the MSR that the user is allowed to change diff -puN arch/ppc64/kernel/xics.c~anton-1 arch/ppc64/kernel/xics.c --- 25/arch/ppc64/kernel/xics.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/xics.c 2003-02-19 11:56:55.000000000 -0800 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -423,8 +424,11 @@ nextnode: } #ifdef CONFIG_SMP - real_irq_to_virt_map[XICS_IPI] = virt_irq_to_real_map[XICS_IPI] = XICS_IPI; - request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, 0, "IPI", 0); + real_irq_to_virt_map[XICS_IPI] = virt_irq_to_real_map[XICS_IPI] = + XICS_IPI; + /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ + request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, SA_INTERRUPT, + "IPI", 0); irq_desc[XICS_IPI+XICS_IRQ_OFFSET].status |= IRQ_PER_CPU; #endif ppc64_boot_msg(0x21, "XICS Done"); diff -puN arch/ppc64/mm/init.c~anton-1 arch/ppc64/mm/init.c --- 25/arch/ppc64/mm/init.c~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/arch/ppc64/mm/init.c 2003-02-19 11:56:55.000000000 -0800 @@ -233,6 +233,7 @@ static void map_io_page(unsigned long ea hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP); + /* Panic if a pte grpup is full */ if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0, _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX, 1, 0) == -1) { diff -puN include/asm-ppc64/hardirq.h~anton-1 include/asm-ppc64/hardirq.h --- 25/include/asm-ppc64/hardirq.h~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/include/asm-ppc64/hardirq.h 2003-02-19 11:56:55.000000000 -0800 @@ -82,7 +82,7 @@ typedef struct { #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) -#if CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else diff -puN include/asm-ppc64/semaphore.h~anton-1 include/asm-ppc64/semaphore.h --- 25/include/asm-ppc64/semaphore.h~anton-1 2003-02-19 11:56:55.000000000 -0800 +++ 25-akpm/include/asm-ppc64/semaphore.h 2003-02-19 11:56:55.000000000 -0800 @@ -23,12 +23,12 @@ struct semaphore { */ atomic_t count; wait_queue_head_t wait; -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG long __magic; #endif }; -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG # define __SEM_DEBUG_INIT(name) \ , (long)&(name).__magic #else @@ -53,7 +53,7 @@ static inline void sema_init (struct sem { atomic_set(&sem->count, val); init_waitqueue_head(&sem->wait); -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG sem->__magic = (long)&sem->__magic; #endif } @@ -74,7 +74,7 @@ extern void __up(struct semaphore * sem) static inline void down(struct semaphore * sem) { -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif might_sleep(); @@ -91,7 +91,7 @@ static inline int down_interruptible(str { int ret = 0; -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif might_sleep(); @@ -106,7 +106,7 @@ static inline int down_trylock(struct se { int ret; -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif @@ -117,7 +117,7 @@ static inline int down_trylock(struct se static inline void up(struct semaphore * sem) { -#if WAITQUEUE_DEBUG +#ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif _