aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/arch_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/arch_timer.c')
-rw-r--r--arch/arm64/kvm/arch_timer.c63
1 files changed, 40 insertions, 23 deletions
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 3df67c127489..bb24a76b4224 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -208,18 +208,16 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
+static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
+ u64 val)
{
- u64 cval, now;
+ u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
- cval = timer_get_cval(timer_ctx);
- now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
-
- if (now < cval) {
+ if (now < val) {
u64 ns;
ns = cyclecounter_cyc2ns(timecounter->cc,
- cval - now,
+ val - now,
timecounter->mask,
&timecounter->frac);
return ns;
@@ -228,6 +226,11 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
return 0;
}
+static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
+{
+ return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
+}
+
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
WARN_ON(timer_ctx && timer_ctx->loaded);
@@ -236,6 +239,20 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
}
+static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
+{
+ return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
+ vcpu_get_flag(vcpu, IN_WFIT));
+}
+
+static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
+ u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+
+ return kvm_counter_compute_delta(ctx, val);
+}
+
/*
* Returns the earliest expiration time in ns among guest timers.
* Note that it will return 0 if none of timers can fire.
@@ -253,6 +270,9 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
}
+ if (vcpu_has_wfit_active(vcpu))
+ min_delta = min(min_delta, wfit_delay_ns(vcpu));
+
/* If none of timers can fire, then return 0 */
if (min_delta == ULLONG_MAX)
return 0;
@@ -350,15 +370,9 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
return cval <= now;
}
-bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- struct timer_map map;
-
- get_timer_map(vcpu, &map);
-
- return kvm_timer_should_fire(map.direct_vtimer) ||
- kvm_timer_should_fire(map.direct_ptimer) ||
- kvm_timer_should_fire(map.emul_ptimer);
+ return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
}
/*
@@ -467,7 +481,7 @@ out:
}
/*
- * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * Schedule the background timer before calling kvm_vcpu_halt, so that this
* thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle.
*/
@@ -484,7 +498,8 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
*/
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
- !kvm_timer_irq_can_fire(map.emul_ptimer))
+ !kvm_timer_irq_can_fire(map.emul_ptimer) &&
+ !vcpu_has_wfit_active(vcpu))
return;
/*
@@ -649,7 +664,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
- struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
if (unlikely(!timer->enabled))
return;
@@ -672,7 +686,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
- if (rcuwait_active(wait))
+ if (kvm_vcpu_is_blocking(vcpu))
kvm_timer_blocking(vcpu);
/*
@@ -750,7 +764,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
/* Make the updates of cntvoff for all vtimer contexts atomic */
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
{
- int i;
+ unsigned long i;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
@@ -1189,8 +1203,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
{
- int vtimer_irq, ptimer_irq;
- int i, ret;
+ int vtimer_irq, ptimer_irq, ret;
+ unsigned long i;
vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
@@ -1216,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid)
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
struct arch_timer_context *timer;
+ if (WARN(!vcpu, "No vcpu context!\n"))
+ return false;
+
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
timer = vcpu_vtimer(vcpu);
else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
@@ -1297,7 +1314,7 @@ void kvm_timer_init_vhe(void)
static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;