From 3596f1f405fd4f6edca51cdf44bb639191b7bb99 Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Fri, 3 Jul 2009 08:44:21 -0500 Subject: [PATCH] sched: make task->oncpu available in all configurations commit 529d35d4c98a136eb811607fe418df5781395db6 in tip. We will use this later in the series to eliminate the need for a function call. [ Steven Rostedt: added task_is_current function ] Signed-off-by: Gregory Haskins Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/sched.h | 11 +++++++---- kernel/sched.c | 37 ++++++++++++++++++++++++++----------- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 5dfd465..898f7f1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1225,10 +1225,8 @@ struct task_struct { int lock_depth; /* BKL lock depth */ #ifdef CONFIG_SMP -#ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif -#endif int prio, static_prio, normal_prio; unsigned int rt_priority; @@ -2023,6 +2021,13 @@ extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); void __yield(void); +#ifdef CONFIG_SMP +static inline int task_is_current(struct task_struct *task) +{ + return task->oncpu; +} +#endif + /* * The default (Linux) execution domain. */ @@ -2675,8 +2680,6 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } -extern int task_is_current(struct task_struct *task); - #endif /* __KERNEL__ */ #endif diff --git a/kernel/sched.c b/kernel/sched.c index 90b4b39..a194f62 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -670,10 +670,12 @@ inline void update_rq_clock(struct rq *rq) rq->clock = sched_clock_cpu(cpu_of(rq)); } +#ifndef CONFIG_SMP int task_is_current(struct task_struct *task) { return task_rq(task)->curr == task; } +#endif /* * Tunables that become constants when CONFIG_SCHED_DEBUG is off: @@ -891,18 +893,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p) return rq->curr == p; } -#ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { +#ifdef CONFIG_SMP + return p->oncpu; +#else return task_current(rq, p); +#endif } +#ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { +#ifdef CONFIG_SMP + /* + * We can optimise this out completely for !SMP, because the + * SMP rebalancing from interrupt is the only thing that cares + * here. + */ + next->oncpu = 1; +#endif } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { +#ifdef CONFIG_SMP + /* + * After ->oncpu is cleared, the task can be moved to a different CPU. + * We must ensure this doesn't happen until the switch is completely + * finished. + */ + smp_wmb(); + prev->oncpu = 0; +#endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq->lock.owner = current; @@ -918,14 +941,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(struct rq *rq, struct task_struct *p) -{ -#ifdef CONFIG_SMP - return p->oncpu; -#else - return task_current(rq, p); -#endif -} static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { @@ -2731,7 +2746,7 @@ void sched_fork(struct task_struct *p, int clone_flags) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) p->oncpu = 0; #endif #ifdef CONFIG_PREEMPT @@ -5563,7 +5578,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) __set_task_cpu(idle, cpu); rq->curr = rq->idle = idle; -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) idle->oncpu = 1; #endif raw_spin_unlock_irqrestore(&rq->lock, flags); -- 1.7.0.4