aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2005-01-11 01:40:38 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-11 01:40:38 -0800
commit0a71336b6a8858a525007c5b4e0d14ba57f9f315 (patch)
tree12bf4f03dc9747a5cc401bc53f27fcdee13a7ef8 /kernel
parent0fada656bc0fbf5403cb7e629a836bfb77e7c096 (diff)
downloadhistory-0a71336b6a8858a525007c5b4e0d14ba57f9f315.tar.gz
[PATCH] cputime: introduce cputime
This patch introduces the concept of (virtual) cputime. Each architecture can define its method to measure cputime. The main idea is to define a cputime_t type and a set of operations on it (see asm-generic/cputime.h). Then use the type for utime, stime, cutime, cstime, it_virt_value, it_virt_incr, it_prof_value and it_prof_incr and use the cputime operations for each access to these variables. The default implementation is jiffies based and the effect of this patch for architectures which use the default implementation should be neglectible. There is a second type cputime64_t which is necessary for the kernel_stat cpu statistics. The default cputime_t is 32 bit and based on HZ, this will overflow after 49.7 days. This is not enough for kernel_stat (ihmo not enough for a processes too), so it is necessary to have a 64 bit type. The third thing that gets introduced by this patch is an additional field for the /proc/stat interface: cpu steal time. An architecture can account cpu steal time by calls to the account_stealtime function. The cpu which backs a virtual processor doesn't spent all of its time for the virtual cpu. To get meaningful cpu usage numbers this involuntary wait time needs to be accounted and exported to user space. From: Hugh Dickins <hugh@veritas.com> The p->signal check in account_system_time is insufficient. If the timer interrupt hits near the end of exit_notify, after EXIT_ZOMBIE has been set, another cpu may release_task (NULLifying p->signal) in between account_system_time's check and check_rlimit's dereference. Nor should account_it_prof risk send_sig. But surely account_user_time is safe? Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c14
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/exit.c18
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/itimer.c57
-rw-r--r--kernel/sched.c168
-rw-r--r--kernel/signal.c14
-rw-r--r--kernel/sys.c34
-rw-r--r--kernel/timer.c65
9 files changed, 238 insertions, 150 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 48ee147c134357..d1b1d4dd019a5a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -163,15 +163,15 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
struct compat_tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
- unsigned long utime, stime, cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
- utime += t->utime;
- stime += t->stime;
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
@@ -190,10 +190,10 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
- tmp.tms_utime = compat_jiffies_to_clock_t(utime);
- tmp.tms_stime = compat_jiffies_to_clock_t(stime);
- tmp.tms_cutime = compat_jiffies_to_clock_t(cutime);
- tmp.tms_cstime = compat_jiffies_to_clock_t(cstime);
+ tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
+ tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
+ tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
+ tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
return -EFAULT;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b97f7f91ec6dcc..628f4ccda12790 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -48,7 +48,9 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock);
for_each_process(p) {
- if (task_cpu(p) == cpu && (p->utime != 0 || p->stime != 0))
+ if (task_cpu(p) == cpu &&
+ (!cputime_eq(p->utime, cputime_zero) ||
+ !cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %lx) \n",
p->comm, p->pid, cpu, p->state, p->flags);
diff --git a/kernel/exit.c b/kernel/exit.c
index 02f0a95cb55705..e0df301a455361 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -755,8 +755,8 @@ static void exit_notify(struct task_struct *tsk)
* Clear these here so that update_process_times() won't try to deliver
* itimer, profile or rlimit signals to this task while it is in late exit.
*/
- tsk->it_virt_value = 0;
- tsk->it_prof_value = 0;
+ tsk->it_virt_value = cputime_zero;
+ tsk->it_prof_value = cputime_zero;
write_unlock_irq(&tasklist_lock);
@@ -1046,10 +1046,16 @@ static int wait_task_zombie(task_t *p, int noreap,
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
- p->parent->signal->cutime +=
- p->utime + p->signal->utime + p->signal->cutime;
- p->parent->signal->cstime +=
- p->stime + p->signal->stime + p->signal->cstime;
+ p->parent->signal->cutime =
+ cputime_add(p->parent->signal->cutime,
+ cputime_add(p->utime,
+ cputime_add(p->signal->utime,
+ p->signal->cutime)));
+ p->parent->signal->cstime =
+ cputime_add(p->parent->signal->cstime,
+ cputime_add(p->stime,
+ cputime_add(p->signal->stime,
+ p->signal->cstime)));
p->parent->signal->cmin_flt +=
p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
p->parent->signal->cmaj_flt +=
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d9412937d3729..be1ff8ddbb9c01 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -749,7 +749,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = 0;
- sig->utime = sig->stime = sig->cutime = sig->cstime = 0;
+ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
@@ -871,15 +871,15 @@ static task_t *copy_process(unsigned long clone_flags,
p->it_real_value = 0;
p->it_real_incr = 0;
- p->it_virt_value = 0;
- p->it_virt_incr = 0;
- p->it_prof_value = 0;
- p->it_prof_incr = 0;
+ p->it_virt_value = cputime_zero;
+ p->it_virt_incr = cputime_zero;
+ p->it_prof_value = cputime_zero;
+ p->it_prof_incr = cputime_zero;
init_timer(&p->real_timer);
p->real_timer.data = (unsigned long) p;
- p->utime = 0;
- p->stime = 0;
+ p->utime = cputime_zero;
+ p->stime = cputime_zero;
p->rchar = 0; /* I/O counter: bytes read */
p->wchar = 0; /* I/O counter: bytes written */
p->syscr = 0; /* I/O counter: read syscalls */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 95fbf1c6becf9f..e1743c56320621 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -16,11 +16,10 @@
int do_getitimer(int which, struct itimerval *value)
{
- register unsigned long val, interval;
+ register unsigned long val;
switch (which) {
case ITIMER_REAL:
- interval = current->it_real_incr;
val = 0;
/*
* FIXME! This needs to be atomic, in case the kernel timer happens!
@@ -32,20 +31,20 @@ int do_getitimer(int which, struct itimerval *value)
if ((long) val <= 0)
val = 1;
}
+ jiffies_to_timeval(val, &value->it_value);
+ jiffies_to_timeval(current->it_real_incr, &value->it_interval);
break;
case ITIMER_VIRTUAL:
- val = current->it_virt_value;
- interval = current->it_virt_incr;
+ cputime_to_timeval(current->it_virt_value, &value->it_value);
+ cputime_to_timeval(current->it_virt_incr, &value->it_interval);
break;
case ITIMER_PROF:
- val = current->it_prof_value;
- interval = current->it_prof_incr;
+ cputime_to_timeval(current->it_prof_value, &value->it_value);
+ cputime_to_timeval(current->it_prof_incr, &value->it_interval);
break;
default:
return(-EINVAL);
}
- jiffies_to_timeval(val, &value->it_value);
- jiffies_to_timeval(interval, &value->it_interval);
return 0;
}
@@ -81,37 +80,43 @@ void it_real_fn(unsigned long __data)
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
- register unsigned long i, j;
+ unsigned long expire;
+ cputime_t cputime;
int k;
- i = timeval_to_jiffies(&value->it_interval);
- j = timeval_to_jiffies(&value->it_value);
if (ovalue && (k = do_getitimer(which, ovalue)) < 0)
return k;
switch (which) {
case ITIMER_REAL:
del_timer_sync(&current->real_timer);
- current->it_real_value = j;
- current->it_real_incr = i;
- if (!j)
+ expire = timeval_to_jiffies(&value->it_value);
+ current->it_real_value = expire;
+ current->it_real_incr =
+ timeval_to_jiffies(&value->it_interval);
+ if (!expire)
break;
- if (j > (unsigned long) LONG_MAX)
- j = LONG_MAX;
- i = j + jiffies;
- current->real_timer.expires = i;
+ if (expire > (unsigned long) LONG_MAX)
+ expire = LONG_MAX;
+ current->real_timer.expires = jiffies + expire;
add_timer(&current->real_timer);
break;
case ITIMER_VIRTUAL:
- if (j)
- j++;
- current->it_virt_value = j;
- current->it_virt_incr = i;
+ cputime = timeval_to_cputime(&value->it_value);
+ if (cputime_gt(cputime, cputime_zero))
+ cputime = cputime_add(cputime,
+ jiffies_to_cputime(1));
+ current->it_virt_value = cputime;
+ cputime = timeval_to_cputime(&value->it_interval);
+ current->it_virt_incr = cputime;
break;
case ITIMER_PROF:
- if (j)
- j++;
- current->it_prof_value = j;
- current->it_prof_incr = i;
+ cputime = timeval_to_cputime(&value->it_value);
+ if (cputime_gt(cputime, cputime_zero))
+ cputime = cputime_add(cputime,
+ jiffies_to_cputime(1));
+ current->it_prof_value = cputime;
+ cputime = timeval_to_cputime(&value->it_interval);
+ current->it_prof_incr = cputime;
break;
default:
return -EINVAL;
diff --git a/kernel/sched.c b/kernel/sched.c
index 1c523337bc617a..9e1fbc42bd0193 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1182,7 +1182,7 @@ void fastcall sched_fork(task_t *p)
*/
current->time_slice = 1;
preempt_disable();
- scheduler_tick(0, 0);
+ scheduler_tick();
local_irq_enable();
preempt_enable();
} else
@@ -2251,48 +2251,168 @@ EXPORT_PER_CPU_SYMBOL(kstat);
((rq)->curr->static_prio > (rq)->best_expired_prio))
/*
+ * Do the virtual cpu time signal calculations.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user space since the last update
+ */
+static inline void account_it_virt(struct task_struct * p, cputime_t cputime)
+{
+ cputime_t it_virt = p->it_virt_value;
+
+ if (cputime_gt(it_virt, cputime_zero) &&
+ cputime_gt(cputime, cputime_zero)) {
+ if (cputime_ge(cputime, it_virt)) {
+ it_virt = cputime_add(it_virt, p->it_virt_incr);
+ send_sig(SIGVTALRM, p, 1);
+ }
+ it_virt = cputime_sub(it_virt, cputime);
+ p->it_virt_value = it_virt;
+ }
+}
+
+/*
+ * Do the virtual profiling signal calculations.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user and kernel space since the last update
+ */
+static void account_it_prof(struct task_struct *p, cputime_t cputime)
+{
+ cputime_t it_prof = p->it_prof_value;
+
+ if (cputime_gt(it_prof, cputime_zero) &&
+ cputime_gt(cputime, cputime_zero)) {
+ if (cputime_ge(cputime, it_prof)) {
+ it_prof = cputime_add(it_prof, p->it_prof_incr);
+ send_sig(SIGPROF, p, 1);
+ }
+ it_prof = cputime_sub(it_prof, cputime);
+ p->it_prof_value = it_prof;
+ }
+}
+
+/*
+ * Check if the process went over its cputime resource limit after
+ * some cpu time got added to utime/stime.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user and kernel space since the last update
+ */
+static void check_rlimit(struct task_struct *p, cputime_t cputime)
+{
+ cputime_t total, tmp;
+
+ total = cputime_add(p->utime, p->stime);
+ tmp = jiffies_to_cputime(p->signal->rlim[RLIMIT_CPU].rlim_cur);
+ if (unlikely(cputime_gt(total, tmp))) {
+ /* Send SIGXCPU every second. */
+ tmp = cputime_sub(total, cputime);
+ if (cputime_to_secs(tmp) < cputime_to_secs(total))
+ send_sig(SIGXCPU, p, 1);
+ /* and SIGKILL when we go over max.. */
+ tmp = jiffies_to_cputime(p->signal->rlim[RLIMIT_CPU].rlim_max);
+ if (cputime_gt(total, tmp))
+ send_sig(SIGKILL, p, 1);
+ }
+}
+
+/*
+ * Account user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in user space since the last update
+ */
+void account_user_time(struct task_struct *p, cputime_t cputime)
+{
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ cputime64_t tmp;
+
+ p->utime = cputime_add(p->utime, cputime);
+
+ /* Check for signals (SIGVTALRM, SIGPROF, SIGXCPU & SIGKILL). */
+ check_rlimit(p, cputime);
+ account_it_virt(p, cputime);
+ account_it_prof(p, cputime);
+
+ /* Add user time to cpustat. */
+ tmp = cputime_to_cputime64(cputime);
+ if (TASK_NICE(p) > 0)
+ cpustat->nice = cputime64_add(cpustat->nice, tmp);
+ else
+ cpustat->user = cputime64_add(cpustat->user, tmp);
+}
+
+/*
+ * Account system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+void account_system_time(struct task_struct *p, int hardirq_offset,
+ cputime_t cputime)
+{
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ runqueue_t *rq = this_rq();
+ cputime64_t tmp;
+
+ p->stime = cputime_add(p->stime, cputime);
+
+ /* Check for signals (SIGPROF, SIGXCPU & SIGKILL). */
+ if (likely(p->signal && p->exit_state < EXIT_ZOMBIE)) {
+ check_rlimit(p, cputime);
+ account_it_prof(p, cputime);
+ }
+
+ /* Add system time to cpustat. */
+ tmp = cputime_to_cputime64(cputime);
+ if (hardirq_count() - hardirq_offset)
+ cpustat->irq = cputime64_add(cpustat->irq, tmp);
+ else if (softirq_count())
+ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+ else if (p != rq->idle)
+ cpustat->system = cputime64_add(cpustat->system, tmp);
+ else if (atomic_read(&rq->nr_iowait) > 0)
+ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
+ else
+ cpustat->idle = cputime64_add(cpustat->idle, tmp);
+}
+
+/*
+ * Account for involuntary wait time.
+ * @p: the process from which the cpu time has been stolen
+ * @steal: the cpu time spent in involuntary wait
+ */
+void account_steal_time(struct task_struct *p, cputime_t steal)
+{
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ cputime64_t steal64 = cputime_to_cputime64(steal);
+ runqueue_t *rq = this_rq();
+
+ if (p == rq->idle)
+ cpustat->system = cputime64_add(cpustat->system, steal64);
+ else
+ cpustat->steal = cputime64_add(cpustat->steal, steal64);
+}
+
+/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*
* It also gets called by the fork code, when changing the parent's
* timeslices.
*/
-void scheduler_tick(int user_ticks, int sys_ticks)
+void scheduler_tick(void)
{
int cpu = smp_processor_id();
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
runqueue_t *rq = this_rq();
task_t *p = current;
rq->timestamp_last_tick = sched_clock();
- if (rcu_pending(cpu))
- rcu_check_callbacks(cpu, user_ticks);
-
- /* note: this timer irq context must be accounted for as well */
- if (hardirq_count() - HARDIRQ_OFFSET) {
- cpustat->irq += sys_ticks;
- sys_ticks = 0;
- } else if (softirq_count()) {
- cpustat->softirq += sys_ticks;
- sys_ticks = 0;
- }
-
if (p == rq->idle) {
- if (atomic_read(&rq->nr_iowait) > 0)
- cpustat->iowait += sys_ticks;
- else
- cpustat->idle += sys_ticks;
if (wake_priority_sleeper(rq))
goto out;
rebalance_tick(cpu, rq, SCHED_IDLE);
return;
}
- if (TASK_NICE(p) > 0)
- cpustat->nice += user_ticks;
- else
- cpustat->user += user_ticks;
- cpustat->system += sys_ticks;
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
diff --git a/kernel/signal.c b/kernel/signal.c
index d800b3f97323f3..6d0a3bd948ab1a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -375,8 +375,8 @@ void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
- sig->utime += tsk->utime;
- sig->stime += tsk->stime;
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
@@ -1470,8 +1470,10 @@ void do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
- info.si_utime = tsk->utime + tsk->signal->utime;
- info.si_stime = tsk->stime + tsk->signal->stime;
+ info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
+ tsk->signal->utime));
+ info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
+ tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
@@ -1527,8 +1529,8 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
- info.si_utime = tsk->utime;
- info.si_stime = tsk->stime;
+ info.si_utime = cputime_to_jiffies(tsk->utime);
+ info.si_stime = cputime_to_jiffies(tsk->stime);
info.si_code = why;
switch (why) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 20080da0c3defb..6e354fd380e7a1 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -893,15 +893,15 @@ asmlinkage long sys_times(struct tms __user * tbuf)
struct tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
- unsigned long utime, stime, cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
- utime += t->utime;
- stime += t->stime;
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
@@ -920,10 +920,10 @@ asmlinkage long sys_times(struct tms __user * tbuf)
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
- tmp.tms_utime = jiffies_to_clock_t(utime);
- tmp.tms_stime = jiffies_to_clock_t(stime);
- tmp.tms_cutime = jiffies_to_clock_t(cutime);
- tmp.tms_cstime = jiffies_to_clock_t(cstime);
+ tmp.tms_utime = cputime_to_clock_t(utime);
+ tmp.tms_stime = cputime_to_clock_t(stime);
+ tmp.tms_cutime = cputime_to_clock_t(cutime);
+ tmp.tms_cstime = cputime_to_clock_t(cstime);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
@@ -1528,7 +1528,7 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
struct task_struct *t;
unsigned long flags;
- unsigned long utime, stime;
+ cputime_t utime, stime;
memset((char *) r, 0, sizeof *r);
@@ -1545,12 +1545,12 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
spin_unlock_irqrestore(&p->sighand->siglock, flags);
- jiffies_to_timeval(utime, &r->ru_utime);
- jiffies_to_timeval(stime, &r->ru_stime);
+ cputime_to_timeval(utime, &r->ru_utime);
+ cputime_to_timeval(stime, &r->ru_stime);
break;
case RUSAGE_SELF:
spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = stime = 0;
+ utime = stime = cputime_zero;
goto sum_group;
case RUSAGE_BOTH:
spin_lock_irqsave(&p->sighand->siglock, flags);
@@ -1561,16 +1561,16 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
sum_group:
- utime += p->signal->utime;
- stime += p->signal->stime;
+ utime = cputime_add(utime, p->signal->utime);
+ stime = cputime_add(stime, p->signal->stime);
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
t = p;
do {
- utime += t->utime;
- stime += t->stime;
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
@@ -1578,8 +1578,8 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
t = next_thread(t);
} while (t != p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
- jiffies_to_timeval(utime, &r->ru_utime);
- jiffies_to_timeval(stime, &r->ru_stime);
+ cputime_to_timeval(utime, &r->ru_utime);
+ cputime_to_timeval(stime, &r->ru_stime);
break;
default:
BUG();
diff --git a/kernel/timer.c b/kernel/timer.c
index ec35a6e801a8e5..6bb47b0e498359 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -806,59 +806,6 @@ static void update_wall_time(unsigned long ticks)
} while (ticks);
}
-static inline void do_process_times(struct task_struct *p,
- unsigned long user, unsigned long system)
-{
- unsigned long psecs;
-
- psecs = (p->utime += user);
- psecs += (p->stime += system);
- if (p->signal && !unlikely(p->exit_state) &&
- psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_cur) {
- /* Send SIGXCPU every second.. */
- if (!(psecs % HZ))
- send_sig(SIGXCPU, p, 1);
- /* and SIGKILL when we go over max.. */
- if (psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_max)
- send_sig(SIGKILL, p, 1);
- }
-}
-
-static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
-{
- unsigned long it_virt = p->it_virt_value;
-
- if (it_virt) {
- it_virt -= ticks;
- if (!it_virt) {
- it_virt = p->it_virt_incr;
- send_sig(SIGVTALRM, p, 1);
- }
- p->it_virt_value = it_virt;
- }
-}
-
-static inline void do_it_prof(struct task_struct *p)
-{
- unsigned long it_prof = p->it_prof_value;
-
- if (it_prof) {
- if (--it_prof == 0) {
- it_prof = p->it_prof_incr;
- send_sig(SIGPROF, p, 1);
- }
- p->it_prof_value = it_prof;
- }
-}
-
-static void update_one_process(struct task_struct *p, unsigned long user,
- unsigned long system, int cpu)
-{
- do_process_times(p, user, system);
- do_it_virt(p, user);
- do_it_prof(p);
-}
-
/*
* Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
@@ -866,11 +813,17 @@ static void update_one_process(struct task_struct *p, unsigned long user,
void update_process_times(int user_tick)
{
struct task_struct *p = current;
- int cpu = smp_processor_id(), system = user_tick ^ 1;
+ int cpu = smp_processor_id();
- update_one_process(p, user_tick, system, cpu);
+ /* Note: this timer irq context must be accounted for as well. */
+ if (user_tick)
+ account_user_time(p, jiffies_to_cputime(1));
+ else
+ account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
run_local_timers();
- scheduler_tick(user_tick, system);
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_tick);
+ scheduler_tick();
}
/*