summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-03 00:54:41 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2017-10-03 00:54:41 -0400
commitbde0334292a7148a2dd562e96f2ec77649102431 (patch)
treeefa7b9889ba7ebcedb25af6f9e805ad65802bc6a
parent523bfd3c0da094129ca54905f7b328c703fdfaa9 (diff)
download4.12-rt-patches-bde0334292a7148a2dd562e96f2ec77649102431.tar.gz
refresh x86-preempt-lazy.patch
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/x86-preempt-lazy.patch36
1 files changed, 18 insertions, 18 deletions
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 51d7e634aa69e6..3905a80a883863 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -1,4 +1,4 @@
-From 27e7c8249e0f683c53400c81f588f3c1690ecf29 Mon Sep 17 00:00:00 2001
+From 77d7d0115176a7dab41f24c2f5d82cdbee99bc0d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 11:03:47 +0100
Subject: [PATCH] x86: Support for lazy preemption
@@ -8,27 +8,27 @@ Implement the x86 pieces for lazy preempt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 5e6a68ed5d64..ef502de03137 100644
+index f3aec424009b..7af373e8ef6f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -159,6 +159,7 @@ config X86
- select HAVE_PERF_REGS
+@@ -161,6 +161,7 @@ config X86
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION
+ select HAVE_PREEMPT_LAZY
select HAVE_STACK_VALIDATION if X86_64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
-index d602b269058f..9bb6144b0e25 100644
+index 964d9986e20e..263f48d215b4 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -130,7 +130,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+@@ -131,7 +131,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
-+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
+- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
@@ -42,10 +42,10 @@ index d602b269058f..9bb6144b0e25 100644
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index 57f7ec35216e..1e4d382963f4 100644
+index 50bc26949e9e..1a0914133635 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -332,8 +332,25 @@ END(ret_from_exception)
+@@ -329,8 +329,25 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
@@ -72,10 +72,10 @@ index 57f7ec35216e..1e4d382963f4 100644
jz restore_all
call preempt_schedule_irq
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index d8b4baf18ec6..26c603fb0d87 100644
+index 24af36ed722c..774fe2f03c1c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -544,7 +544,23 @@ retint_kernel:
+@@ -541,7 +541,23 @@ retint_kernel:
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -152,7 +152,7 @@ index ec1f3c651150..d9cd82a824a6 100644
#ifdef CONFIG_PREEMPT
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index 9fc44b95f7cb..8a3d741830c3 100644
+index e00e1bd6e7b3..a22b5e86eeed 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -54,11 +54,14 @@ struct task_struct;
@@ -181,23 +181,23 @@ index 9fc44b95f7cb..8a3d741830c3 100644
#endif
/*
-@@ -85,6 +92,7 @@ struct thread_info {
+@@ -82,6 +89,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
- #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
-@@ -109,6 +117,7 @@ struct thread_info {
+ #define TIF_PATCH_PENDING 13 /* pending live patching update */
+@@ -107,6 +115,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_NOCPUID (1 << TIF_NOCPUID)
-@@ -145,6 +154,8 @@ struct thread_info {
+ #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
+@@ -146,6 +155,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)