summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 13:46:06 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 13:46:06 -0500
commit5765246abe7e6dddceaaabadbd2b7d30cb57d79e (patch)
tree921e151e4a3a2a9135440b5dd376d74599031160
parentb52d07c64afc7d7a1919cbd8c64be59f60a39a73 (diff)
download4.9-rt-patches-5765246abe7e6dddceaaabadbd2b7d30cb57d79e.tar.gz
Revert "rtmutex: unrefresh for 4.8.15 --> 4.8.0"
This reverts commit bf7ea5b9a8b167ef3f3fc8359333e6c7b117dd18.
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch48
1 files changed, 19 insertions, 29 deletions
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 71583c5636cc9..e2887fa2adadf 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -1,7 +1,6 @@
-From b4498097109b0fa232869ad8b8e0593c6ea59766 Mon Sep 17 00:00:00 2001
+Subject: rtmutex: Handle the various new futex race conditions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 10 Jun 2011 11:04:15 +0200
-Subject: [PATCH] rtmutex: Handle the various new futex race conditions
RT opens a few new interesting race conditions in the rtmutex/futex
combo due to futex hash bucket lock being a 'sleeping' spinlock and
@@ -9,16 +8,14 @@ therefor not disabling preemption.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/futex.c | 77 +++++++++++++++++++++++++++++++++--------
- kernel/locking/rtmutex.c | 36 +++++++++++++++----
- kernel/locking/rtmutex_common.h | 2 ++
+ kernel/futex.c | 77 ++++++++++++++++++++++++++++++++--------
+ kernel/locking/rtmutex.c | 36 +++++++++++++++---
+ kernel/locking/rtmutex_common.h | 2 +
3 files changed, 94 insertions(+), 21 deletions(-)
-diff --git a/kernel/futex.c b/kernel/futex.c
-index 46cb3a301bc1..76abbf8568b3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1915,6 +1915,16 @@ retry_private:
+@@ -1915,6 +1915,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -35,7 +32,7 @@ index 46cb3a301bc1..76abbf8568b3 100644
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2805,7 +2815,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2805,7 +2815,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -44,7 +41,7 @@ index 46cb3a301bc1..76abbf8568b3 100644
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2864,20 +2874,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2864,20 +2874,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -111,7 +108,7 @@ index 46cb3a301bc1..76abbf8568b3 100644
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2886,14 +2931,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2886,14 +2931,15 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -129,7 +126,7 @@ index 46cb3a301bc1..76abbf8568b3 100644
}
} else {
/*
-@@ -2906,7 +2952,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2906,7 +2952,8 @@ static int futex_wait_requeue_pi(u32 __u
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -139,12 +136,10 @@ index 46cb3a301bc1..76abbf8568b3 100644
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index dbbf116249e6..6e2fcc026a88 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -69,6 +69,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
- clear_rt_mutex_waiters(lock);
+@@ -133,6 +133,11 @@ static void fixup_rt_mutex_waiters(struc
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
@@ -155,7 +150,7 @@ index dbbf116249e6..6e2fcc026a88 100644
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -357,7 +362,8 @@ int max_lock_depth = 1024;
+@@ -421,7 +426,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -165,7 +160,7 @@ index dbbf116249e6..6e2fcc026a88 100644
}
/*
-@@ -493,7 +499,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -557,7 +563,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -174,7 +169,7 @@ index dbbf116249e6..6e2fcc026a88 100644
goto out_unlock_pi;
/*
-@@ -907,6 +913,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -971,6 +977,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -198,7 +193,7 @@ index dbbf116249e6..6e2fcc026a88 100644
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -930,7 +953,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -994,7 +1017,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
@@ -207,7 +202,7 @@ index dbbf116249e6..6e2fcc026a88 100644
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1014,7 +1037,7 @@ static void remove_waiter(struct rt_mutex *lock,
+@@ -1078,7 +1101,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -216,7 +211,7 @@ index dbbf116249e6..6e2fcc026a88 100644
raw_spin_lock(&current->pi_lock);
rt_mutex_dequeue(lock, waiter);
-@@ -1038,7 +1061,8 @@ static void remove_waiter(struct rt_mutex *lock,
+@@ -1102,7 +1125,8 @@ static void remove_waiter(struct rt_mute
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -226,7 +221,7 @@ index dbbf116249e6..6e2fcc026a88 100644
raw_spin_unlock(&owner->pi_lock);
-@@ -1074,7 +1098,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+@@ -1138,7 +1162,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -235,11 +230,9 @@ index dbbf116249e6..6e2fcc026a88 100644
!dl_prio(task->prio))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
-diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
-index 4f5f83c7d2d3..1c1bcc5cbd69 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -97,6 +97,8 @@ enum rtmutex_chainwalk {
+@@ -98,6 +98,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
@@ -248,6 +241,3 @@ index 4f5f83c7d2d3..1c1bcc5cbd69 100644
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
---
-2.10.1
-