aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-10-25 16:52:32 +0200
committerAvi Kivity <avi@qumranet.com>2008-01-30 18:01:20 +0200
commitb93463aa59d67b21b4921e30bd5c5dcc7c35ffbd (patch)
tree8de9a2789624d27155d8a94c93a23c01f473fea7 /arch
parentKVM: local APIC TPR access reporting facility (diff)
downloadlinux-dev-b93463aa59d67b21b4921e30bd5c5dcc7c35ffbd.tar.xz
linux-dev-b93463aa59d67b21b4921e30bd5c5dcc7c35ffbd.zip
KVM: Accelerated apic support
This adds a mechanism for exposing the virtual apic tpr to the guest, and a protocol for letting the guest update the tpr without causing a vmexit if conditions allow (e.g. there is no interrupt pending with a higher priority than the new tpr). Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/lapic.c51
-rw-r--r--arch/x86/kvm/lapic.h6
-rw-r--r--arch/x86/kvm/x86.c55
3 files changed, 111 insertions, 1 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 50c3f3a8dd3d..e7513bb98af1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -815,7 +815,8 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
if (!apic)
return;
- apic_set_tpr(apic, ((cr8 & 0x0f) << 4));
+ apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
+ | (apic_get_reg(apic, APIC_TASKPRI) & 4));
}
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
@@ -1104,3 +1105,51 @@ void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
}
EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
+
+void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+{
+ u32 data;
+ void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+
+ vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+ data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+ kunmap_atomic(vapic, KM_USER0);
+
+ apic_set_tpr(vcpu->arch.apic, data & 0xff);
+}
+
+void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+{
+ u32 data, tpr;
+ int max_irr, max_isr;
+ struct kvm_lapic *apic;
+ void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+
+ apic = vcpu->arch.apic;
+ tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
+ max_irr = apic_find_highest_irr(apic);
+ if (max_irr < 0)
+ max_irr = 0;
+ max_isr = apic_find_highest_isr(apic);
+ if (max_isr < 0)
+ max_isr = 0;
+ data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+ vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+ *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+ kunmap_atomic(vapic, KM_USER0);
+}
+
+void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+{
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 447b654aefbb..676c396c9cee 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -18,6 +18,8 @@ struct kvm_lapic {
struct kvm_vcpu *vcpu;
struct page *regs_page;
void *regs;
+ gpa_t vapic_addr;
+ struct page *vapic_page;
};
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
@@ -41,4 +43,8 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c2b80884447e..e7eac27adb7f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1173,6 +1173,19 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0;
break;
};
+ case KVM_SET_VAPIC_ADDR: {
+ struct kvm_vapic_addr va;
+
+ r = -EINVAL;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
+ r = 0;
+ kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -2214,6 +2227,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
}
switch (nr) {
+ case KVM_HC_VAPIC_POLL_IRQ:
+ ret = 0;
+ break;
default:
ret = -KVM_ENOSYS;
break;
@@ -2421,6 +2437,29 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
vcpu->arch.irq_summary == 0);
}
+static void vapic_enter(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct page *page;
+
+ if (!apic || !apic->vapic_addr)
+ return;
+
+ page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+ vcpu->arch.apic->vapic_page = page;
+}
+
+static void vapic_exit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (!apic || !apic->vapic_addr)
+ return;
+
+ kvm_release_page_dirty(apic->vapic_page);
+ mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+}
+
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
@@ -2435,6 +2474,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
}
+ vapic_enter(vcpu);
+
preempted:
if (vcpu->guest_debug.enabled)
kvm_x86_ops->guest_debug_pre(vcpu);
@@ -2444,6 +2485,14 @@ again:
if (unlikely(r))
goto out;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
+ &vcpu->requests)) {
+ kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
+ r = 0;
+ goto out;
+ }
+
kvm_inject_pending_timer_irqs(vcpu);
preempt_disable();
@@ -2469,6 +2518,8 @@ again:
else
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
+ kvm_lapic_sync_to_vapic(vcpu);
+
vcpu->guest_mode = 1;
kvm_guest_enter();
@@ -2506,6 +2557,8 @@ again:
if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
vcpu->arch.exception.pending = false;
+ kvm_lapic_sync_from_vapic(vcpu);
+
r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
if (r > 0) {
@@ -2527,6 +2580,8 @@ out:
post_kvm_run_save(vcpu, kvm_run);
+ vapic_exit(vcpu);
+
return r;
}