summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 11:57:31 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 11:57:31 -0500
commit2723fee9a7c5d2976717f6862d11c045dad090cc (patch)
tree9f7756bd1d941c3320bfe864707d504902bf772c
parentfabb76afd0b303b6b57a5d49af979134f6d222bd (diff)
download4.9-rt-patches-2723fee9a7c5d2976717f6862d11c045dad090cc.tar.gz
rwsem: import new patch for rc2+
-rw-r--r--patches/peterz-percpu-rwsem-rt.patch218
-rw-r--r--patches/series1
2 files changed, 219 insertions, 0 deletions
diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch
new file mode 100644
index 00000000000000..ec31b61aaff7cc
--- /dev/null
+++ b/patches/peterz-percpu-rwsem-rt.patch
@@ -0,0 +1,218 @@
+Subject: locking/percpu-rwsem: Remove preempt_disable variants
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed Nov 23 16:29:32 CET 2016
+
+Effective revert commit:
+
+ 87709e28dc7c ("fs/locks: Use percpu_down_read_preempt_disable()")
+
+This is causing major pain for PREEMPT_RT and is only a very small
+performance issue for PREEMPT=y.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+---
+---
+ fs/locks.c | 32 ++++++++++++++++----------------
+ include/linux/percpu-rwsem.h | 24 ++++--------------------
+ 2 files changed, 20 insertions(+), 36 deletions(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode
+ return -ENOMEM;
+ }
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ if (request->fl_flags & FL_ACCESS)
+ goto find_conflict;
+@@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode
+
+ out:
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+ if (new_fl)
+ locks_free_lock(new_fl);
+ locks_dispose_list(&dispose);
+@@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode
+ new_fl2 = locks_alloc_lock();
+ }
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ /*
+ * New lock request. Walk all POSIX locks and look for conflicts. If
+@@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode
+ }
+ out:
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+ /*
+ * Free any unused locks.
+ */
+@@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, u
+ return error;
+ }
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+
+ time_out_leases(inode, &dispose);
+@@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, u
+ locks_insert_block(fl, new_fl);
+ trace_break_lease_block(inode, new_fl);
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+
+ locks_dispose_list(&dispose);
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+ !new_fl->fl_next, break_time);
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ trace_break_lease_unblock(inode, new_fl);
+ locks_delete_block(new_fl);
+@@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, u
+ }
+ out:
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+ locks_dispose_list(&dispose);
+ locks_free_lock(new_fl);
+ return error;
+@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
+
+ ctx = smp_load_acquire(&inode->i_flctx);
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ time_out_leases(inode, &dispose);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
+ break;
+ }
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+
+ locks_dispose_list(&dispose);
+ }
+@@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, lon
+ return -EINVAL;
+ }
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ time_out_leases(inode, &dispose);
+ error = check_conflicting_open(dentry, arg, lease->fl_flags);
+@@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, lon
+ lease->fl_lmops->lm_setup(lease, priv);
+ out:
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+ locks_dispose_list(&dispose);
+ if (is_deleg)
+ inode_unlock(inode);
+@@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct f
+ return error;
+ }
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp &&
+@@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct f
+ if (victim)
+ error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+ locks_dispose_list(&dispose);
+ return error;
+ }
+@@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, st
+ if (list_empty(&ctx->flc_lease))
+ return;
+
+- percpu_down_read_preempt_disable(&file_rwsem);
++ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+ if (filp == fl->fl_file)
+ lease_modify(fl, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
+- percpu_up_read_preempt_enable(&file_rwsem);
++ percpu_up_read(&file_rwsem);
+
+ locks_dispose_list(&dispose);
+ }
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name =
+ extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+ extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
++static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+ {
+ might_sleep();
+
+@@ -46,16 +46,10 @@ static inline void percpu_down_read_pree
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ __percpu_down_read(sem, false); /* Unconditional memory barrier */
+- barrier();
+ /*
+- * The barrier() prevents the compiler from
++ * The preempt_enable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+-}
+-
+-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+-{
+- percpu_down_read_preempt_disable(sem);
+ preempt_enable();
+ }
+
+@@ -82,13 +76,9 @@ static inline int percpu_down_read_trylo
+ return ret;
+ }
+
+-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
++static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+ {
+- /*
+- * The barrier() prevents the compiler from
+- * bleeding the critical section out.
+- */
+- barrier();
++ preempt_disable();
+ /*
+ * Same as in percpu_down_read().
+ */
+@@ -101,12 +91,6 @@ static inline void percpu_up_read_preemp
+ rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+ }
+
+-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+-{
+- preempt_disable();
+- percpu_up_read_preempt_enable(sem);
+-}
+-
+ extern void percpu_down_write(struct percpu_rw_semaphore *);
+ extern void percpu_up_write(struct percpu_rw_semaphore *);
+
diff --git a/patches/series b/patches/series
index 42144376442177..dea119b46e38f9 100644
--- a/patches/series
+++ b/patches/series
@@ -352,6 +352,7 @@ genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
hrtimer-Move-schedule_work-call-to-helper-thread.patch
# FS
+peterz-percpu-rwsem-rt.patch
fs-namespace-preemption-fix.patch
mm-protect-activate-switch-mm.patch
fs-block-rt-support.patch