summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-08-26 16:01:45 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-08-26 16:01:45 -0400
commita337b6acd4453e492809005ff2e31e34cc1f263a (patch)
tree73770d26c6af754620e8238a8aedf452372f359f
parente576a44f3ec236c3d6890a7464079928030c1231 (diff)
download4.8-rt-patches-a337b6acd4453e492809005ff2e31e34cc1f263a.tar.gz
cookie context refresh
-rw-r--r--patches/preempt-lazy-support.patch103
1 files changed, 59 insertions, 44 deletions
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 020e2379c4da1c..6593ca1bfdbd7d 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -1,6 +1,7 @@
-Subject: sched: Add support for lazy preemption
+From 2830ac728df177160cf24146ace12f62280a0a13 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
+Subject: [PATCH] sched: Add support for lazy preemption
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -51,25 +52,12 @@ there is a clear trend that it enhances the non RT workload
performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/x86/include/asm/preempt.h | 18 +++++++++++++-
- include/linux/preempt.h | 29 ++++++++++++++++++++++-
- include/linux/sched.h | 37 ++++++++++++++++++++++++++++++
- include/linux/thread_info.h | 12 +++++++++
- include/linux/trace_events.h | 1
- kernel/Kconfig.preempt | 6 ++++
- kernel/sched/core.c | 50 ++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/fair.c | 16 ++++++-------
- kernel/sched/features.h | 3 ++
- kernel/sched/sched.h | 9 +++++++
- kernel/trace/trace.c | 37 ++++++++++++++++++------------
- kernel/trace/trace.h | 2 +
- kernel/trace/trace_output.c | 14 +++++++++--
- 13 files changed, 205 insertions(+), 29 deletions(-)
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index d397deb58146..190af4271b5c 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,33 @@ static __always_inline void __preempt_co
+@@ -79,17 +79,33 @@ static __always_inline void __preempt_count_sub(int val)
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
@@ -104,6 +92,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_PREEMPT
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 5f733773f54e..1cfb1cb72354 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
@@ -163,9 +153,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
set_preempt_need_resched(); \
} while (0)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 0465fce1d8e5..ae44cf400535 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3009,6 +3009,43 @@ static inline int test_tsk_need_resched(
+@@ -3115,6 +3115,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -209,9 +201,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index b4c2a485b28a..5580c2c8410d 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(st
+@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -230,6 +224,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 247acf066c1a..682bfdf20f16 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -58,6 +58,7 @@ struct trace_entry {
@@ -240,6 +236,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
#define TRACE_EVENT_TYPE_MAX \
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index f8a2982bdbde..11dbe26a8279 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
@@ -255,9 +253,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
choice
prompt "Preemption Model"
default PREEMPT_NONE
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 51ca30386b64..ec1fd53d3b11 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -475,6 +475,38 @@ void resched_curr(struct rq *rq)
+@@ -509,6 +509,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -296,7 +296,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2392,6 +2424,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2474,6 +2506,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -306,7 +306,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3181,6 +3216,7 @@ void migrate_disable(void)
+@@ -3285,6 +3320,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,7 +314,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3220,6 +3256,7 @@ void migrate_enable(void)
+@@ -3324,6 +3360,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,15 +322,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3359,6 +3396,7 @@ static void __sched notrace __schedule(b
+@@ -3464,6 +3501,7 @@ static void __sched notrace __schedule(bool preempt)
- next = pick_next_task(rq, prev);
+ next = pick_next_task(rq, prev, cookie);
clear_tsk_need_resched(prev);
+ clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3504,6 +3542,14 @@ asmlinkage __visible void __sched notrac
+@@ -3624,6 +3662,14 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
@@ -343,9 +343,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return;
+#endif
do {
- preempt_disable_notrace();
/*
-@@ -5249,7 +5295,9 @@ void init_idle(struct task_struct *idle,
+ * Because the function tracer can trace preempt_count_sub()
+@@ -5384,7 +5430,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -356,9 +356,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 218f8e83db73..d8cd3a5038f9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3333,7 +3333,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3428,7 +3428,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -367,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3357,7 +3357,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3452,7 +3452,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
@@ -376,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3502,7 +3502,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3597,7 +3597,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
@@ -385,7 +387,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3684,7 +3684,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3779,7 +3779,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -394,7 +396,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4296,7 +4296,7 @@ static void hrtick_start_fair(struct rq
+@@ -4391,7 +4391,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (rq->curr == p)
@@ -403,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5441,7 +5441,7 @@ static void check_preempt_wakeup(struct
+@@ -5576,7 +5576,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
@@ -412,7 +414,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8192,7 +8192,7 @@ static void task_fork_fair(struct task_s
+@@ -8316,7 +8316,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -421,7 +423,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -8217,7 +8217,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8341,7 +8341,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -430,6 +432,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
check_preempt_curr(rq, p, 0);
}
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 11258a0feae7..6d28fcd08872 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -442,9 +446,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
/*
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index f02e7cc09111..5559d4e5e98e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1304,6 +1304,15 @@ extern void init_sched_fair_class(void);
+@@ -1313,6 +1313,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -460,9 +466,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 448b92794c31..3ffea48d795c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1657,6 +1657,7 @@ tracing_generic_entry_update(struct trac
+@@ -1657,6 +1657,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -470,7 +478,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1667,7 +1668,8 @@ tracing_generic_entry_update(struct trac
+@@ -1667,7 +1668,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -480,7 +488,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2563,15 +2565,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2563,15 +2565,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
@@ -507,7 +515,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2597,11 +2601,14 @@ static void print_func_help_header_irq(s
+@@ -2597,11 +2601,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -527,6 +535,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 3fff4adfd431..acb00bc2b0e0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head {
@@ -545,9 +555,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
#define TRACE_BUF_SIZE 1024
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index c86764255a8c..455a7464772f 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
@@ -555,7 +567,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
char irqs_off;
int hardirq;
int softirq;
-@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
break;
}
@@ -565,7 +577,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hardsoft_irq =
(nmi && hardirq) ? 'Z' :
nmi ? 'z' :
-@@ -424,14 +428,20 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -424,14 +428,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
softirq ? 's' :
'.' ;
@@ -588,3 +600,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (entry->migrate_disable)
trace_seq_printf(s, "%x", entry->migrate_disable);
else
+--
+2.5.0
+