aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile3
-rw-r--r--arch/mips/kvm/emulate.c8
-rw-r--r--arch/mips/kvm/loongson_ipi.c4
-rw-r--r--arch/mips/kvm/mips.c82
-rw-r--r--arch/mips/kvm/mmu.c14
-rw-r--r--arch/mips/kvm/tlb.c2
-rw-r--r--arch/mips/kvm/vz.c12
8 files changed, 75 insertions, 51 deletions
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index a77297480f56..91d197bee9c0 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -27,6 +27,7 @@ config KVM
select KVM_MMIO
select MMU_NOTIFIER
select SRCU
+ select INTERVAL_TREE
help
Support for hosting Guest kernels.
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index d3710959da55..21ff75bcdbc4 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,9 +2,10 @@
# Makefile for KVM support for MIPS
#
+include $(srctree)/virt/kvm/Makefile.kvm
+
ccflags-y += -Ivirt/kvm -Iarch/mips/kvm
-kvm-y := $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o)
kvm-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-y += mips.o emulate.o entry.o \
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 22e745e49b0a..edaec93a1a1f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -952,16 +952,14 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1;
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
/*
- * We we are runnable, then definitely go off to user space to
+ * We are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ if (kvm_arch_vcpu_runnable(vcpu))
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
- }
}
return EMULATE_DONE;
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
index 3681fc8fba38..5d53f32d837c 100644
--- a/arch/mips/kvm/loongson_ipi.c
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -120,7 +120,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
s->status |= data;
irq.cpu = id;
irq.irq = 6;
- kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
break;
case CORE0_CLEAR_OFF:
@@ -128,7 +128,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
if (!s->status) {
irq.cpu = id;
irq.irq = -6;
- kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
}
break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index aa20d074d388..a25e0b73ee70 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -171,25 +171,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
-void kvm_mips_free_vcpus(struct kvm *kvm)
-{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_vcpu_destroy(vcpu);
- }
-
- mutex_lock(&kvm->lock);
-
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
-
- mutex_unlock(&kvm->lock);
-}
-
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
{
/* It should always be safe to remove after flushing the whole range */
@@ -199,7 +180,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kvm_mips_free_vcpus(kvm);
+ kvm_destroy_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}
@@ -233,25 +214,20 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int needs_flush;
- kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
- __func__, kvm, mem->slot, mem->guest_phys_addr,
- mem->memory_size, mem->userspace_addr);
-
/*
* If dirty page logging is enabled, write protect all pages in the slot
* ready for dirty logging.
@@ -438,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_mips_callbacks->vcpu_run(vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -458,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
lose_fpu(1);
local_irq_disable();
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
@@ -469,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(vcpu);
+ r = kvm_mips_vcpu_enter_exit(vcpu);
+
+ /*
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
+ *
+ * TODO: is there a barrier which ensures that pending interrupts are
+ * recognised? Currently this just hopes that the CPU takes any pending
+ * interrupts between the enable and disable.
+ */
+ local_irq_enable();
+ local_irq_disable();
trace_kvm_out(vcpu);
- guest_exit_irqoff();
+ guest_timing_exit_irqoff();
local_irq_enable();
out:
@@ -498,7 +505,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
if (irq->cpu == -1)
dvcpu = vcpu;
else
- dvcpu = vcpu->kvm->vcpus[irq->cpu];
+ dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
@@ -1192,7 +1199,7 @@ static void kvm_mips_set_c0_status(void)
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
@@ -1381,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
return ret;
}
+int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_exit_irqoff();
+ ret = __kvm_mips_handle_exit(vcpu);
+ guest_state_enter_irqoff();
+
+ return ret;
+}
+
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 1bfd1b501d82..74cd64a24d05 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
@@ -615,17 +615,17 @@ retry:
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
/*
- * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
- * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
+ * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
- * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+ * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
- * mmu_notifier_seq is incremented.
+ * mmu_invalidate_seq is incremented.
*/
smp_rmb();
@@ -638,7 +638,7 @@ retry:
spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a3b50d5e3b25..4e91971daae1 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
* @vcpu: KVM VCPU pointer.
* @gpa: Guest virtual address in a TLB mapped guest segment.
- * @gpa: Ponter to output guest physical address it maps to.
+ * @gpa: Pointer to output guest physical address it maps to.
*
* Converts a guest virtual address in a guest TLB mapped segment to a guest
* physical address, by probing the guest TLB.
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 4adca5abbc72..c706f5890a05 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
- * @compare: Pointer to write compare value to.
- * @cause: Pointer to write cause value to.
+ * @out_compare: Pointer to write compare value to.
+ * @out_cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
@@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
}
/**
- * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
@@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{