aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic/vgic.c
diff options
context:
space:
mode:
authorChristoffer Dall <cdall@linaro.org>2017-03-21 21:16:12 +0100
committerChristoffer Dall <cdall@linaro.org>2017-04-09 07:45:32 -0700
commit90cac1f52ad1db73b6ed99143ce7ad473bd90a95 (patch)
tree8928ee4f17bf82e88c1a7c0b49a4aa9cbff24f1d /virt/kvm/arm/vgic/vgic.c
parentKVM: arm/arm64: vgic: Get rid of live_lrs (diff)
downloadlinux-dev-90cac1f52ad1db73b6ed99143ce7ad473bd90a95.tar.xz
linux-dev-90cac1f52ad1db73b6ed99143ce7ad473bd90a95.zip
KVM: arm/arm64: vgic: Only set underflow when actually out of LRs
We currently assume that all the interrupts in our AP list will be queued to LRs, but that's not necessarily the case, because some of them could have been migrated away to different VCPUs and only the VCPU thread itself can remove interrupts from its AP list. Therefore, slightly change the logic to only setting the underflow interrupt when we actually run out of LRs. As it turns out, this allows us to further simplify the handling in vgic_sync_hwstate in later patches. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
Diffstat (limited to '')
-rw-r--r--virt/kvm/arm/vgic/vgic.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 104329139f24..442f7df2a46a 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -601,10 +601,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
- if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
- vgic_set_underflow(vcpu);
+ if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
vgic_sort_ap_list(vcpu);
- }
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);
@@ -623,8 +621,12 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
next:
spin_unlock(&irq->irq_lock);
- if (count == kvm_vgic_global_state.nr_lr)
+ if (count == kvm_vgic_global_state.nr_lr) {
+ if (!list_is_last(&irq->ap_list,
+ &vgic_cpu->ap_list_head))
+ vgic_set_underflow(vcpu);
break;
+ }
}
vcpu->arch.vgic_cpu.used_lrs = count;