summaryrefslogtreecommitdiffstats
path: root/sched-Break-out-from-load_balancing-on-rq_lock-conte.patch
blob: e74378ab0f576a11a6f60b65d9524bef99ab58eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
From 708dbc824657476807e387253001204df09c25ee Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Tue, 16 Mar 2010 14:31:44 -0700
Subject: [PATCH] sched: Break out from load_balancing on rq_lock contention

commit 5d2740b70e7f6ad29104aec72956fb6e4d143809 in tip.

[PG: account for sched --> sched_fair code moves and that
 the lock break in move_tasks is now upstream as baa8c110]

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 kernel/sched.c      |    4 ++++
 kernel/sched_fair.c |   18 ++++++++++++++++++
 2 files changed, 22 insertions(+), 0 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 46869d9..e94915f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -824,7 +824,11 @@ late_initcall(sched_init_debug);
  * Number of tasks to iterate in a single balance run.
  * Limited because this is done with IRQs disabled.
  */
+#ifndef CONFIG_PREEMPT
 const_debug unsigned int sysctl_sched_nr_migrate = 32;
+#else
+const_debug unsigned int sysctl_sched_nr_migrate = 8;
+#endif
 
 /*
  * ratelimit for updating the group shares.
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index add5302..f776fff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1954,6 +1954,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		 */
 		if (idle == CPU_NEWLY_IDLE)
 			break;
+
+		if (raw_spin_is_contended(&this_rq->lock) ||
+		    raw_spin_is_contended(&busiest->lock))
+			break;
 #endif
 
 		/*
@@ -2022,6 +2026,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		rem_load_move -= moved_load;
 		if (rem_load_move < 0)
 			break;
+
+#ifdef CONFIG_PREEMPT
+		/*
+		 * NEWIDLE balancing is a source of latency, so preemptible
+		 * kernels will stop after the first task is pulled to minimize
+		 * the critical section.
+		 */
+		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+			break;
+
+		if (raw_spin_is_contended(&this_rq->lock) ||
+				raw_spin_is_contended(&busiest->lock))
+			break;
+#endif
 	}
 	rcu_read_unlock();
 
-- 
1.7.0.4