aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerald Schaefer <gerald.schaefer@de.ibm.com>2014-03-26 12:37:47 +0530
committerEli Qiao <taget@linux.vnet.ibm.com>2014-03-31 13:50:57 +0800
commit19da3e46931fa15713c0503720f567c2b8d8ff2b (patch)
treeb592ed65eb1dff582e677f737ff2a93cb5f2121e
parentf2043fc9c0847bf8c70892c48c5b70b2b6743a67 (diff)
downloadpowerkvm-19da3e46931fa15713c0503720f567c2b8d8ff2b.tar.gz
sched/autogroup: Fix race with task_groups list
In autogroup_create(), a tg is allocated and added to the task_groups list. If CONFIG_RT_GROUP_SCHED is set, this tg is then modified while on the list, without locking. This can race with someone walking the list, like __enable_runtime() during CPU unplug, and result in a use-after-free bug. To fix this, move sched_online_group(), which adds the tg to the list, to the end of the autogroup_create() function after the modification. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1369411669-46971-2-git-send-email-gerald.schaefer@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> (cherry picked from commit 41261b6a832ea0e788627f6a8707854423f9ff49)
-rw-r--r--kernel/sched/auto_group.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 64de5f8b0c9ed..4a073539c58e6 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -77,8 +77,6 @@ static inline struct autogroup *autogroup_create(void)
if (IS_ERR(tg))
goto out_free;
- sched_online_group(tg, &root_task_group);
-
kref_init(&ag->kref);
init_rwsem(&ag->lock);
ag->id = atomic_inc_return(&autogroup_seq_nr);
@@ -98,6 +96,7 @@ static inline struct autogroup *autogroup_create(void)
#endif
tg->autogroup = ag;
+ sched_online_group(tg, &root_task_group);
return ag;
out_free: