aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/cputime.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2017-01-31 04:09:37 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 09:13:56 +0100
commit23244a5c8003d4154161a8289a7d3783b0237c08 (patch)
tree3f9b562bcfdab20333a14b408891272997080125 /kernel/sched/cputime.c
parentsched/cputime: Remove temporary cputime_t accessors (diff)
downloadlinux-dev-23244a5c8003d4154161a8289a7d3783b0237c08.tar.xz
linux-dev-23244a5c8003d4154161a8289a7d3783b0237c08.zip
sched/cputime: Push time to account_user_time() in nsecs
This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-22-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to '')
-rw-r--r--kernel/sched/cputime.c42
1 files changed, 24 insertions, 18 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f7b9624c7df0..55d31c35833a 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -116,18 +116,18 @@ static inline void task_group_account_field(struct task_struct *p, int index,
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
*/
-void account_user_time(struct task_struct *p, cputime_t cputime)
+void account_user_time(struct task_struct *p, u64 cputime)
{
int index;
/* Add user time to process. */
- p->utime += cputime_to_nsecs(cputime);
- account_group_user_time(p, cputime_to_nsecs(cputime));
+ p->utime += cputime;
+ account_group_user_time(p, cputime);
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
- task_group_account_field(p, index, cputime_to_nsecs(cputime));
+ task_group_account_field(p, index, cputime);
/* Account for user time used */
acct_account_cputime(p);
@@ -363,8 +363,9 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq, int ticks)
{
- u64 cputime = (__force u64) cputime_one_jiffy * ticks;
+ u64 old_cputime = (__force u64) cputime_one_jiffy * ticks;
cputime_t other;
+ u64 cputime;
/*
* When returning from idle, many ticks can get accounted at
@@ -374,9 +375,11 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* other time can exceed ticks occasionally.
*/
other = account_other_time(ULONG_MAX);
- if (other >= cputime)
+ if (other >= old_cputime)
return;
- cputime -= other;
+
+ old_cputime -= other;
+ cputime = cputime_to_nsecs(old_cputime);
if (this_cpu_ksoftirqd() == p) {
/*
@@ -384,15 +387,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
- account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
+ account_system_index_time(p, old_cputime, CPUTIME_SOFTIRQ);
} else if (user_tick) {
account_user_time(p, cputime);
} else if (p == rq->idle) {
- account_idle_time(cputime);
+ account_idle_time(old_cputime);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
- account_guest_time(p, cputime);
+
+ account_guest_time(p, old_cputime);
} else {
- account_system_index_time(p, cputime, CPUTIME_SYSTEM);
+ account_system_index_time(p, old_cputime, CPUTIME_SYSTEM);
}
}
@@ -473,7 +477,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t cputime, steal;
+ cputime_t old_cputime, steal;
+ u64 cputime;
struct rq *rq = this_rq();
if (vtime_accounting_cpu_enabled())
@@ -484,20 +489,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
return;
}
- cputime = cputime_one_jiffy;
+ old_cputime = cputime_one_jiffy;
steal = steal_account_process_time(ULONG_MAX);
- if (steal >= cputime)
+ if (steal >= old_cputime)
return;
- cputime -= steal;
+ old_cputime -= steal;
+ cputime = cputime_to_nsecs(old_cputime);
if (user_tick)
account_user_time(p, cputime);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, cputime);
+ account_system_time(p, HARDIRQ_OFFSET, old_cputime);
else
- account_idle_time(cputime);
+ account_idle_time(old_cputime);
}
/*
@@ -736,7 +742,7 @@ void vtime_account_user(struct task_struct *tsk)
tsk->vtime_snap_whence = VTIME_SYS;
if (vtime_delta(tsk)) {
delta_cpu = get_vtime_delta(tsk);
- account_user_time(tsk, delta_cpu);
+ account_user_time(tsk, cputime_to_nsecs(delta_cpu));
}
write_seqcount_end(&tsk->vtime_seqcount);
}