summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-09-05 16:04:34 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-09-05 16:04:34 -0400
commit715fb6c6786b28dfa8548a7f3af3241ec94a472f (patch)
tree5a4da2d7bff41fdd92b6403548b3bc23bc2ba7e9
parent13ea66437ffdad61997eff27f65ce586f5aa725a (diff)
download4.8-rt-patches-715fb6c6786b28dfa8548a7f3af3241ec94a472f.tar.gz
sched: basic refresh for lazy patchrt-v4.8-rc2
-rw-r--r--patches/preempt-lazy-support.patch26
1 files changed, 13 insertions, 13 deletions
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 1ad678c3570389..d82e5adda05830 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -1,4 +1,4 @@
-From b6eeb836b34c92fbd882f541a4116fe20ae5e884 Mon Sep 17 00:00:00 2001
+From 68e702f5b65e913496de185f4a08feff08e58429 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
Subject: [PATCH] sched: Add support for lazy preemption
@@ -202,7 +202,7 @@ index 22f008147701..d6648d0105b1 100644
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index 352b1542f5cc..436e3e745248 100644
+index cbd8990e2e77..c96328cef351 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
@@ -222,8 +222,8 @@ index 352b1542f5cc..436e3e745248 100644
+#define tif_need_resched_lazy() 0
+#endif
- #endif /* __KERNEL__ */
-
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+ static inline int arch_within_stack_frames(const void * const stack,
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 28aa5381878e..15154b13a53b 100644
--- a/include/linux/trace_events.h
@@ -254,10 +254,10 @@ index f8a2982bdbde..11dbe26a8279 100644
prompt "Preemption Model"
default PREEMPT_NONE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 8508acf8dec9..aa1fd5970cbc 100644
+index aa01fcbb8829..167bef5dd8a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -509,6 +509,38 @@ void resched_curr(struct rq *rq)
+@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -296,7 +296,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2499,6 +2531,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -2500,6 +2532,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -306,7 +306,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3315,6 +3350,7 @@ void migrate_disable(void)
+@@ -3334,6 +3369,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,7 +314,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3354,6 +3390,7 @@ void migrate_enable(void)
+@@ -3373,6 +3409,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,7 +322,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3494,6 +3531,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -3513,6 +3550,7 @@ static void __sched notrace __schedule(bool preempt)
next = pick_next_task(rq, prev, cookie);
clear_tsk_need_resched(prev);
@@ -330,7 +330,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3654,6 +3692,14 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -3673,6 +3711,14 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
@@ -345,7 +345,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5417,7 +5463,9 @@ void init_idle(struct task_struct *idle, int cpu)
+@@ -5436,7 +5482,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -357,7 +357,7 @@ index 8508acf8dec9..aa1fd5970cbc 100644
* The idle tasks have their own, simple scheduling class:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 4088eedea763..3838f05d54b4 100644
+index 039de34f1521..678399d2f8bd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3486,7 +3486,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)