summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-07-12 13:10:15 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-07-12 13:10:15 +0200
commit2794710f8319e935ca6b4e3e753a8267bf8ce844 (patch)
treeb57155d1316a99de67dd4f395225f24f9a024ffa
parent5d2c597266888703b5212abb1ce43b58c4a7b641 (diff)
download4.9-rt-patches-2794710f8319e935ca6b4e3e753a8267bf8ce844.tar.gz
[ANNOUNCE] 4.6.4-rt6
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/cond-resched-softirq-rt.patch4
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch2
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch2
-rw-r--r--patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch4
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch10
-rw-r--r--patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch2
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch6
-rw-r--r--patches/preempt-lazy-check-preempt_schedule.patch6
-rw-r--r--patches/preempt-lazy-support.patch10
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch6
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch8
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
18 files changed, 39 insertions, 39 deletions
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index ace719e619dc6b..10c03f93f7ef8a 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -286,7 +286,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3141,7 +3141,10 @@ void migrate_disable(void)
+@@ -3142,7 +3142,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -298,7 +298,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -3168,7 +3171,10 @@ void migrate_enable(void)
+@@ -3169,7 +3172,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index c2e160c596f3de..aa2581b244d3a9 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4812,6 +4812,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4813,6 +4813,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4825,6 +4826,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4826,6 +4827,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index b0ea81dcb908a8..263c1c0fda88ba 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11475,7 +11475,7 @@ void intel_check_page_flip(struct drm_de
+@@ -11476,7 +11476,7 @@ void intel_check_page_flip(struct drm_de
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index b5816a3b2b13e8..880f244e7f3375 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -3061,6 +3066,69 @@ static inline void schedule_debug(struct
+@@ -3062,6 +3067,69 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
index 5ddcd9a719b110..49b05de6ca2ade 100644
--- a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3117,7 +3117,7 @@ void migrate_disable(void)
+@@ -3118,7 +3118,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
-@@ -3144,7 +3144,7 @@ void migrate_enable(void)
+@@ -3145,7 +3145,7 @@ void migrate_enable(void)
{
struct task_struct *p = current;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 72cdd2b3c76008..4c1841b6475d12 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt6
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 3d1e94ce4d3c62..44afca7012f65f 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4484,12 +4487,12 @@ static int mem_cgroup_move_account(struc
+@@ -4487,12 +4490,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5339,10 +5342,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5342,10 +5345,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5394,14 +5397,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5397,14 +5400,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5719,6 +5722,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5722,6 +5725,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5747,9 +5751,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5750,9 +5754,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index e3ff1f47d19a4a..8668cc78e9cf24 100644
--- a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5554,10 +5554,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5557,10 +5557,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 6cdbb866a99690..2738349ca2c2bf 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <uapi/linux/netfilter/x_tables.h>
/**
-@@ -285,6 +286,8 @@ void xt_free_table_info(struct xt_table_
+@@ -292,6 +293,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -305,6 +308,9 @@ static inline unsigned int xt_write_recs
+@@ -312,6 +315,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -335,6 +341,7 @@ static inline void xt_write_recseq_end(u
+@@ -342,6 +348,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/patches/preempt-lazy-check-preempt_schedule.patch b/patches/preempt-lazy-check-preempt_schedule.patch
index 7d88d204c49cd0..b1fe601e93bcd8 100644
--- a/patches/preempt-lazy-check-preempt_schedule.patch
+++ b/patches/preempt-lazy-check-preempt_schedule.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3500,6 +3500,30 @@ static void __sched notrace preempt_sche
+@@ -3501,6 +3501,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3514,6 +3538,8 @@ asmlinkage __visible void __sched notrac
+@@ -3515,6 +3539,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_schedule_common();
}
-@@ -3540,15 +3566,9 @@ asmlinkage __visible void __sched notrac
+@@ -3541,15 +3567,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 092eabb2027efb..95e1ab220a8cd9 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -306,7 +306,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3180,6 +3215,7 @@ void migrate_disable(void)
+@@ -3181,6 +3216,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,7 +314,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -3219,6 +3255,7 @@ void migrate_enable(void)
+@@ -3220,6 +3256,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
@@ -322,7 +322,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#endif
-@@ -3358,6 +3395,7 @@ static void __sched notrace __schedule(b
+@@ -3359,6 +3396,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -330,7 +330,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3503,6 +3541,14 @@ asmlinkage __visible void __sched notrac
+@@ -3504,6 +3542,14 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
preempt_disable_notrace();
/*
-@@ -5246,7 +5292,9 @@ void init_idle(struct task_struct *idle,
+@@ -5247,7 +5293,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index e61553c194a7aa..1380590e5008eb 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7534,7 +7534,7 @@ void __init sched_init(void)
+@@ -7535,7 +7535,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index c3561909065565..265e408de1a55d 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5299,6 +5303,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5300,6 +5304,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5313,7 +5319,11 @@ void idle_task_exit(void)
+@@ -5314,7 +5320,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5509,6 +5519,10 @@ migration_call(struct notifier_block *nf
+@@ -5510,6 +5520,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index a673c48f44ee16..c921f4a01a90ff 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3305,8 +3305,10 @@ static void __sched notrace __schedule(b
+@@ -3306,8 +3306,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 88cef85cdc117f..baa26ea6eb2146 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3347,7 +3347,16 @@ asmlinkage __visible void __sched notrac
+@@ -3348,7 +3348,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 80c6388ba2f565..851783abab28fe 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3322,21 +3271,6 @@ static void __sched notrace __schedule(b
+@@ -3323,21 +3272,6 @@ static void __sched notrace __schedule(b
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3369,6 +3303,14 @@ static inline void sched_submit_work(str
+@@ -3370,6 +3304,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3377,6 +3319,12 @@ static inline void sched_submit_work(str
+@@ -3378,6 +3320,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3387,6 +3335,7 @@ asmlinkage __visible void __sched schedu
+@@ -3388,6 +3336,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 59e5d64ab78704..0ab3ad3d373b49 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3417,9 +3417,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
+@@ -3418,9 +3418,8 @@ STACK_FRAME_NON_STANDARD(__schedule); /*
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3427,6 +3426,10 @@ static inline void sched_submit_work(str
+@@ -3428,6 +3427,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 71a343418f83ea..792681bd01f487 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5850,6 +5850,13 @@ int kvm_arch_init(void *opaque)
+@@ -5855,6 +5855,13 @@ int kvm_arch_init(void *opaque)
goto out;
}