summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-30 12:41:05 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-30 12:41:05 -0500
commite416b66145c65a93640aa8f1f53dfcfb1288cdb9 (patch)
treeac5d0fe35755f8831996d9de47e933aa6b810a2e
parentc78267e83e90c66730597848562ed63323be2970 (diff)
downloadrt-patches-e416b66145c65a93640aa8f1f53dfcfb1288cdb9.tar.gz
more sched --> sched_fair fallout, plus some upstream
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--sched-Break-out-from-load_balancing-on-rq_lock-conte.patch37
1 files changed, 15 insertions, 22 deletions
diff --git a/sched-Break-out-from-load_balancing-on-rq_lock-conte.patch b/sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
index e0df4e3..bb8fedd 100644
--- a/sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
+++ b/sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
@@ -1,18 +1,22 @@
-From 35fd1cf795e3e3a3132a7dc05b54abe86b0b1988 Mon Sep 17 00:00:00 2001
+From 3d4ccca8d248e7d89507266a9aad7e005b19daef Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue, 16 Mar 2010 14:31:44 -0700
Subject: [PATCH] sched: Break out from load_balancing on rq_lock contention
commit 5d2740b70e7f6ad29104aec72956fb6e4d143809 in tip.
+[PG: account for sched --> sched_fair code moves and that
+ the lock break in move_tasks is now upstream as baa8c110]
+
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/kernel/sched.c b/kernel/sched.c
-index 4c581d4..2770368 100644
+index 429d251..a196eb8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -851,7 +851,11 @@ late_initcall(sched_init_debug);
+@@ -813,7 +813,11 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
@@ -24,33 +28,22 @@ index 4c581d4..2770368 100644
/*
* ratelimit for updating the group shares.
-@@ -3490,6 +3494,10 @@ next:
- */
- if (idle == CPU_NEWLY_IDLE)
- goto out;
-+
-+ if (raw_spin_is_contended(&this_rq->lock) ||
-+ raw_spin_is_contended(&busiest->lock))
-+ goto out;
- #endif
-
- /*
-@@ -3546,6 +3554,10 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 4c47b93..07132f0 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1954,6 +1954,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
- if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+ if (idle == CPU_NEWLY_IDLE)
break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
#endif
- } while (class && max_load_move > total_load_moved);
-diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
-index cff45e4..5240469 100644
---- a/kernel/sched_fair.c
-+++ b/kernel/sched_fair.c
-@@ -1915,6 +1915,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ /*
+@@ -2022,6 +2026,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rem_load_move -= moved_load;
if (rem_load_move < 0)
break;