aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/vtime.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2015-01-14 17:52:10 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-01-22 12:16:01 +0100
commit10ad34bc76dfbc49bda327a13012f6754c0c72e0 (patch)
tree4d76fb6882da2eb3e77b53c4fe64f6367ffbd79d /arch/s390/kernel/vtime.c
parents390: avoid z13 cache aliasing (diff)
downloadlinux-dev-10ad34bc76dfbc49bda327a13012f6754c0c72e0.tar.xz
linux-dev-10ad34bc76dfbc49bda327a13012f6754c0c72e0.zip
s390: add SMT support
The multi-threading facility is introduced with the z13 processor family. This patch adds code to detect the multi-threading facility. With the facility enabled each core will surface multiple hardware threads to the system. Each hardware threads looks like a normal CPU to the operating system with all its registers and properties. The SCLP interface reports the SMT topology indirectly via the maximum thread id. Each reported CPU in the result of a read-scp-information is a core representing a number of hardware threads. To reflect the reduced CPU capacity if two hardware threads run on a single core the MT utilization counter set is used to normalize the raw cputime obtained by the CPU timer deltas. This scaled cputime is reported via the taskstats interface. The normal /proc/stat numbers are based on the raw cputime and are not affected by the normalization. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/vtime.c')
-rw-r--r--arch/s390/kernel/vtime.c58
1 files changed, 54 insertions, 4 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e34122e539a1..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
+#include <asm/cpu_mf.h>
+#include <asm/smp.h>
static void virt_timer_expire(void);
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
+static DEFINE_PER_CPU(u64, mt_cycles[32]);
+static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
+static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
+
static inline u64 get_vtimer(void)
{
u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
+ u64 user_scaled, system_scaled;
+ int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+ /* Do MT utilization calculation */
+ if (smp_cpu_mtid) {
+ u64 cycles_new[32], *cycles_old;
+ u64 delta, mult, div;
+
+ cycles_old = this_cpu_ptr(mt_cycles);
+ if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
+ mult = div = 0;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ delta = cycles_new[i] - cycles_old[i];
+ mult += delta;
+ div += (i + 1) * delta;
+ }
+ if (mult > 0) {
+ /* Update scaling factor */
+ __this_cpu_write(mt_scaling_mult, mult);
+ __this_cpu_write(mt_scaling_div, div);
+ memcpy(cycles_old, cycles_new,
+ sizeof(u64) * (smp_cpu_mtid + 1));
+ }
+ }
+ }
+
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
ti->user_timer = S390_lowcore.user_timer;
- account_user_time(tsk, user, user);
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, hardirq_offset, system, system);
+
+ user_scaled = user;
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ user_scaled = (user_scaled * mult) / div;
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_user_time(tsk, user, user_scaled);
+ account_system_time(tsk, hardirq_offset, system, system_scaled);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- u64 timer, system;
+ u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, 0, system, system);
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_system_time(tsk, 0, system, system_scaled);
virt_timer_forward(system);
}