summaryrefslogtreecommitdiffstats
path: root/sched-Extend-activate_task-to-allow-queueing-to-the-.patch
blob: d8f1abb09fc41bcd0b9ae40f395b43443f397187 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
From 45db8d4ffc7f7524d795dd92219ff1042188a959 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 21 Feb 2010 19:23:36 +0100
Subject: [PATCH] sched: Extend activate_task to allow queueing to the head of a list

commit babe95bad86cba3843cb53d1cee8ac39c491a64a in tip.

The ability of enqueueing a task to the head of a SCHED_FIFO priority
list is required to fix some violations of POSIX scheduling policy.

Extend activate_task with a "head" argument and fix up all callers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 kernel/sched.c      |   17 +++++++++--------
 kernel/sched_fair.c |    2 +-
 kernel/sched_rt.c   |    4 ++--
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 56ad49c..9a25a5f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1978,12 +1978,13 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
 /*
  * activate_task - move a task to the runqueue.
  */
-static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+activate_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
 {
 	if (task_contributes_to_load(p))
 		rq->nr_uninterruptible--;
 
-	enqueue_task(rq, p, wakeup, false);
+	enqueue_task(rq, p, wakeup, head);
 	inc_nr_running(rq);
 }
 
@@ -2533,7 +2534,7 @@ out_activate:
 		schedstat_inc(p, se.nr_wakeups_local);
 	else
 		schedstat_inc(p, se.nr_wakeups_remote);
-	activate_task(rq, p, 1);
+	activate_task(rq, p, 1, false);
 	success = 1;
 
 	/*
@@ -2800,7 +2801,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 	BUG_ON(p->state != TASK_WAKING);
 	p->state = TASK_RUNNING;
 	update_rq_clock(rq);
-	activate_task(rq, p, 0);
+	activate_task(rq, p, 0, false);
 	trace_sched_wakeup_new(rq, p, 1);
 	check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -4877,7 +4878,7 @@ recheck:
 	if (running)
 		p->sched_class->set_curr_task(rq);
 	if (on_rq) {
-		activate_task(rq, p, 0);
+		activate_task(rq, p, 0, false);
 
 		check_class_changed(rq, p, prev_class, oldprio, running);
 	}
@@ -5794,7 +5795,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 	if (p->se.on_rq) {
 		deactivate_task(rq_src, p, 0);
 		set_task_cpu(p, dest_cpu);
-		activate_task(rq_dest, p, 0);
+		activate_task(rq_dest, p, 0, false);
 		check_preempt_curr(rq_dest, p, 0);
 	}
 done:
@@ -5962,7 +5963,7 @@ void sched_idle_next(void)
 	__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
 	update_rq_clock(rq);
-	activate_task(rq, p, 0);
+	activate_task(rq, p, 0, false);
 
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -8268,7 +8269,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
 		deactivate_task(rq, p, 0);
 	__setscheduler(rq, p, SCHED_NORMAL, 0);
 	if (on_rq) {
-		activate_task(rq, p, 0);
+		activate_task(rq, p, 0, false);
 		resched_task(rq->curr);
 	}
 }
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b873769..add5302 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1828,7 +1828,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
 {
 	deactivate_task(src_rq, p, 0);
 	set_task_cpu(p, this_cpu);
-	activate_task(this_rq, p, 0);
+	activate_task(this_rq, p, 0, false);
 	check_preempt_curr(this_rq, p, 0);
 }
 
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b2f6d2b..fdf667b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1429,7 +1429,7 @@ static int push_rt_task(struct rq *rq)
 
 	deactivate_task(rq, next_task, 0);
 	set_task_cpu(next_task, lowest_rq->cpu);
-	activate_task(lowest_rq, next_task, 0);
+	activate_task(lowest_rq, next_task, 0, false);
 
 	resched_task(lowest_rq->curr);
 
@@ -1512,7 +1512,7 @@ static int pull_rt_task(struct rq *this_rq)
 
 			deactivate_task(src_rq, p, 0);
 			set_task_cpu(p, this_cpu);
-			activate_task(this_rq, p, 0);
+			activate_task(this_rq, p, 0, false);
 			/*
 			 * We continue with the search, just in
 			 * case there's an even higher prio task
-- 
1.7.0.4