aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-09-09 06:54:17 +0200
committerIngo Molnar <mingo@kernel.org>2014-09-09 06:54:17 +0200
commitf3d347222f9901431a10b6aa373967bfc8eee6ba (patch)
tree44e4b7257fd638b257b62e26f562dc1fc65398b5
parent92d03f953cec933db010b1cf44ea3a745cc9ab1f (diff)
parent8236d907ab3411ad452280faa8b26c1347327380 (diff)
downloadtip-f3d347222f9901431a10b6aa373967bfc8eee6ba.tar.gz
Merge branch 'sched/core'
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c3
2 files changed, 10 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 53985a609963a..07d67dd7e4d78 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4650,7 +4650,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
- if (task_on_rq_queued(p)) {
+ if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
@@ -4783,6 +4783,12 @@ static int migration_cpu_stop(void *data)
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
+ /*
+ * We need to explicitly wake pending tasks before running
+ * __migrate_task() such that we will not miss enforcing cpus_allowed
+ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+ */
+ sched_ttwu_pending();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50d2025c1777b..be9e97b0d76f6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2382,6 +2382,9 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
tg_contrib -= cfs_rq->tg_load_contrib;
+ if (!tg_contrib)
+ return;
+
if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
atomic_long_add(tg_contrib, &tg->load_avg);
cfs_rq->tg_load_contrib += tg_contrib;