summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-03 01:03:37 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-03 01:03:37 -0400
commit3f75297e803b70940ef62b497482b27aaa452b4f (patch)
tree56c20b0333162d020d07a1ce48168d80f089ef1f
parentbde0334292a7148a2dd562e96f2ec77649102431 (diff)
download4.12-rt-patches-3f75297e803b70940ef62b497482b27aaa452b4f.tar.gz
refresh powerpc preempt lazy patch
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/powerpc-preempt-lazy-support.patch52
1 files changed, 30 insertions, 22 deletions
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index facf5b1019737c..b60e957316d92b 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -1,21 +1,17 @@
+From a608ba4dbdfc249f58585852efe1569ab960b63f Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
-Subject: powerpc: Add support for lazy preemption
+Subject: [PATCH] powerpc: Add support for lazy preemption
Implement the powerpc pieces for lazy preempt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/powerpc/Kconfig | 1 +
- arch/powerpc/include/asm/thread_info.h | 11 ++++++++---
- arch/powerpc/kernel/asm-offsets.c | 1 +
- arch/powerpc/kernel/entry_32.S | 17 ++++++++++++-----
- arch/powerpc/kernel/entry_64.S | 14 +++++++++++---
- 5 files changed, 33 insertions(+), 11 deletions(-)
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index fff65b147584..52edc92421b7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -155,6 +155,7 @@ config PPC
+@@ -154,6 +154,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -23,6 +19,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 6fc6464f7421..2245bfc02bd4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -43,6 +43,8 @@ struct thread_info {
@@ -34,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
-@@ -88,8 +90,7 @@ static inline struct thread_info *curren
+@@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
@@ -43,8 +41,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-@@ -107,6 +108,8 @@ static inline struct thread_info *curren
+ #define TIF_PATCH_PENDING 6 /* pending live patching update */
+@@ -108,6 +109,8 @@ static inline struct thread_info *current_thread_info(void)
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
@@ -53,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -125,14 +128,16 @@ static inline struct thread_info *curren
+@@ -127,14 +130,17 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -64,13 +62,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_RESTORE_TM)
-+ _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
+- _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
++ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
++ _TIF_NEED_RESCHED_LAZY)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 4367e7df51a1..016d03766921 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -156,6 +156,7 @@ int main(void)
@@ -81,9 +82,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_CPU, thread_info, cpu);
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index a38600949f3a..c7bf018a0a83 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -845,7 +845,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -845,7 +845,14 @@ resume_kernel:
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -98,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -856,11 +863,11 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -856,11 +863,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
@@ -113,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1183,7 +1190,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1183,7 +1190,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -122,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1204,7 +1211,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1204,7 +1211,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
@@ -131,9 +134,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 767ef6d68c9e..2cb4d5552319 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -656,7 +656,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
bl restore_math
b restore
#endif
@@ -142,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -718,10 +718,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -718,10 +718,18 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -162,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -738,7 +746,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
+@@ -738,7 +746,7 @@ resume_kernel:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
@@ -171,3 +176,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne 1b
/*
+--
+2.1.4
+