diff options
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/Kconfig | 11 | ||||
-rw-r--r-- | arch/mips/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/kvm/callback.c | 14 | ||||
-rw-r--r-- | arch/mips/kvm/emulate.c | 30 | ||||
-rw-r--r-- | arch/mips/kvm/entry.c | 431 | ||||
-rw-r--r-- | arch/mips/kvm/fpu.S | 6 | ||||
-rw-r--r-- | arch/mips/kvm/interrupt.h | 4 | ||||
-rw-r--r-- | arch/mips/kvm/loongson_ipi.c | 2 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 85 | ||||
-rw-r--r-- | arch/mips/kvm/mmu.c | 74 | ||||
-rw-r--r-- | arch/mips/kvm/stats.c | 4 | ||||
-rw-r--r-- | arch/mips/kvm/trace.h | 8 | ||||
-rw-r--r-- | arch/mips/kvm/vz.c | 35 |
13 files changed, 286 insertions, 420 deletions
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 91d197bee9c0..ab57221fa4dd 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -17,17 +17,16 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" - depends on HAVE_KVM + depends on CPU_SUPPORTS_VZ depends on MIPS_FP_SUPPORT select EXPORT_UASM - select PREEMPT_NOTIFIERS + select KVM_COMMON select KVM_GENERIC_DIRTYLOG_READ_PROTECT - select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO - select MMU_NOTIFIER - select SRCU - select INTERVAL_TREE + select KVM_GENERIC_MMU_NOTIFIER + select KVM_GENERIC_HARDWARE_ENABLING + select HAVE_KVM_READONLY_MEM help Support for hosting Guest kernels. diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 21ff75bcdbc4..805aeea2166e 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -17,4 +17,4 @@ kvm-$(CONFIG_CPU_LOONGSON64) += loongson_ipi.o kvm-y += vz.o obj-$(CONFIG_KVM) += kvm.o -obj-y += callback.o tlb.o +obj-y += tlb.o diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c deleted file mode 100644 index d88aa2173fb0..000000000000 --- a/arch/mips/kvm/callback.c +++ /dev/null @@ -1,14 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Yann Le Du <ledu@kymasys.com> - */ - -#include <linux/export.h> -#include <linux/kvm_host.h> - -struct kvm_mips_callbacks *kvm_mips_callbacks; -EXPORT_SYMBOL_GPL(kvm_mips_callbacks); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index b494d8d39290..0feec52222fb 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) */ int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); @@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) */ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; ktime_t expires, threshold; u32 count, compare; int running; @@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) */ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; /* If count disabled just read static copy of count */ if (kvm_mips_count_disabled(vcpu)) @@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u32 count) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; u32 compare; u64 delta; ktime_t expire; @@ -531,7 +531,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, * to be used for a period of time, but the exact ktime corresponding to the * final Count that must be restored is not known. * - * It is gauranteed that a timer interrupt immediately after restore will be + * It is guaranteed that a timer interrupt immediately after restore will be * handled, but not if CP0_Compare is exactly at @count. That case should * already be handled when the hardware timer state is saved. * @@ -603,7 +603,7 @@ resume: */ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; ktime_t now; /* Calculate bias */ @@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) */ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; int dc; ktime_t now; u32 count; @@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) */ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; int dc; u32 old_compare = kvm_read_c0_guest_compare(cop0); s32 delta = compare - old_compare; @@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) */ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; u32 count; ktime_t now; @@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) */ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; kvm_set_c0_guest_cause(cop0, CAUSEF_DC); if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) @@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) */ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; u32 count; kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); @@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) */ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; s64 changed = count_ctl ^ vcpu->arch.count_ctl; s64 delta; ktime_t expire, now; @@ -955,13 +955,11 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) kvm_vcpu_halt(vcpu); /* - * We we are runnable, then definitely go off to user space to + * We are runnable, then definitely go off to user space to * check if any I/O interrupts are pending. */ - if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { - kvm_clear_request(KVM_REQ_UNHALT, vcpu); + if (kvm_arch_vcpu_runnable(vcpu)) vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; - } } return EMULATE_DONE; diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index aceed14aa1f7..ac8e074c6bb7 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -13,70 +13,17 @@ #include <linux/kvm_host.h> #include <linux/log2.h> +#include <asm/mipsregs.h> #include <asm/mmu_context.h> #include <asm/msa.h> +#include <asm/regdef.h> #include <asm/setup.h> #include <asm/tlbex.h> #include <asm/uasm.h> -/* Register names */ -#define ZERO 0 -#define AT 1 -#define V0 2 -#define V1 3 -#define A0 4 -#define A1 5 - -#if _MIPS_SIM == _MIPS_SIM_ABI32 -#define T0 8 -#define T1 9 -#define T2 10 -#define T3 11 -#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ - -#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 -#define T0 12 -#define T1 13 -#define T2 14 -#define T3 15 -#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ - -#define S0 16 -#define S1 17 -#define T9 25 -#define K0 26 -#define K1 27 -#define GP 28 -#define SP 29 -#define RA 31 - -/* Some CP0 registers */ -#define C0_PWBASE 5, 5 -#define C0_HWRENA 7, 0 -#define C0_BADVADDR 8, 0 -#define C0_BADINSTR 8, 1 -#define C0_BADINSTRP 8, 2 -#define C0_PGD 9, 7 -#define C0_ENTRYHI 10, 0 -#define C0_GUESTCTL1 10, 4 -#define C0_STATUS 12, 0 -#define C0_GUESTCTL0 12, 6 -#define C0_CAUSE 13, 0 -#define C0_EPC 14, 0 -#define C0_EBASE 15, 1 -#define C0_CONFIG5 16, 5 -#define C0_DDATA_LO 28, 3 -#define C0_ERROREPC 30, 0 - #define CALLFRAME_SIZ 32 -#ifdef CONFIG_64BIT -#define ST0_KX_IF_64 ST0_KX -#else -#define ST0_KX_IF_64 0 -#endif - -static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; +static unsigned int scratch_vcpu[2] = { C0_DDATALO }; static unsigned int scratch_tmp[2] = { C0_ERROREPC }; enum label_id { @@ -212,60 +159,60 @@ void *kvm_mips_build_vcpu_run(void *addr) unsigned int i; /* - * A0: vcpu + * GPR_A0: vcpu */ /* k0/k1 not being used in host kernel context */ - UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); + UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs)); for (i = 16; i < 32; ++i) { if (i == 24) i = 28; - UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); + UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); } /* Save host status */ - uasm_i_mfc0(&p, V0, C0_STATUS); - UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); + uasm_i_mfc0(&p, GPR_V0, C0_STATUS); + UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1); /* Save scratch registers, will be used to store pointer to vcpu etc */ - kvm_mips_build_save_scratch(&p, V1, K1); + kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1); /* VCPU scratch register has pointer to vcpu */ - UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]); /* Offset into vcpu->arch */ - UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); + UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch)); /* * Save the host stack to VCPU, used for exception processing * when we exit from the Guest */ - UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); + UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); /* Save the kernel gp as well */ - UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); + UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); /* * Setup status register for running the guest in UM, interrupts * are disabled */ - UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); - uasm_i_mtc0(&p, K0, C0_STATUS); + UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); uasm_i_ehb(&p); /* load up the new EBASE */ - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); - build_set_exc_base(&p, K0); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); + build_set_exc_base(&p, GPR_K0); /* * Now that the new EBASE has been loaded, unset BEV, set * interrupt mask as it was but make sure that timer interrupts * are enabled */ - uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); - uasm_i_andi(&p, V0, V0, ST0_IM); - uasm_i_or(&p, K0, K0, V0); - uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); + uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM); + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); uasm_i_ehb(&p); p = kvm_mips_build_enter_guest(p); @@ -296,15 +243,15 @@ static void *kvm_mips_build_enter_guest(void *addr) memset(relocs, 0, sizeof(relocs)); /* Set Guest EPC */ - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); - UASM_i_MTC0(&p, T0, C0_EPC); + UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); + UASM_i_MTC0(&p, GPR_T0, C0_EPC); /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ if (cpu_has_ldpte) - UASM_i_MFC0(&p, K0, C0_PWBASE); + UASM_i_MFC0(&p, GPR_K0, C0_PWBASE); else - UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); + UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); /* * Set up KVM GPA pgd. @@ -312,24 +259,24 @@ static void *kvm_mips_build_enter_guest(void *addr) * - call tlbmiss_handler_setup_pgd(mm->pgd) * - write mm->pgd into CP0_PWBase * - * We keep S0 pointing at struct kvm so we can load the ASID below. + * We keep GPR_S0 pointing at struct kvm so we can load the ASID below. */ - UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) - - (int)offsetof(struct kvm_vcpu, arch), K1); - UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0); - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); - uasm_i_jalr(&p, RA, T9); + UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) - + (int)offsetof(struct kvm_vcpu, arch), GPR_K1); + UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0); + UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); + uasm_i_jalr(&p, GPR_RA, GPR_T9); /* delay slot */ if (cpu_has_htw) - UASM_i_MTC0(&p, A0, C0_PWBASE); + UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); else uasm_i_nop(&p); /* Set GM bit to setup eret to VZ guest context */ - uasm_i_addiu(&p, V1, ZERO, 1); - uasm_i_mfc0(&p, K0, C0_GUESTCTL0); - uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1); - uasm_i_mtc0(&p, K0, C0_GUESTCTL0); + uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1); + uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); + uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1); + uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); if (cpu_has_guestid) { /* @@ -338,13 +285,13 @@ static void *kvm_mips_build_enter_guest(void *addr) */ /* Get current GuestID */ - uasm_i_mfc0(&p, T0, C0_GUESTCTL1); + uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); /* Set GuestCtl1.RID = GuestCtl1.ID */ - uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, + uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT, MIPS_GCTL1_ID_WIDTH); - uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, + uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT, MIPS_GCTL1_RID_WIDTH); - uasm_i_mtc0(&p, T0, C0_GUESTCTL1); + uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); /* GuestID handles dealiasing so we don't need to touch ASID */ goto skip_asid_restore; @@ -353,65 +300,65 @@ static void *kvm_mips_build_enter_guest(void *addr) /* Root ASID Dealias (RAD) */ /* Save host ASID */ - UASM_i_MFC0(&p, K0, C0_ENTRYHI); - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), - K1); + UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), + GPR_K1); /* Set the root ASID for the Guest */ - UASM_i_ADDIU(&p, T1, S0, + UASM_i_ADDIU(&p, GPR_T1, GPR_S0, offsetof(struct kvm, arch.gpa_mm.context.asid)); /* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */ - uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); + uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP); /* index the ASID array */ - uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); - UASM_i_ADDU(&p, T3, T1, T2); - UASM_i_LW(&p, K0, 0, T3); + uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long))); + UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2); + UASM_i_LW(&p, GPR_K0, 0, GPR_T3); #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE /* * reuse ASID array offset * cpuinfo_mips is a multiple of sizeof(long) */ - uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); - uasm_i_mul(&p, T2, T2, T3); + uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); + uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3); - UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); - UASM_i_ADDU(&p, AT, AT, T2); - UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); - uasm_i_and(&p, K0, K0, T2); + UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask); + UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2); + UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT); + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2); #else - uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); + uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID); #endif /* Set up KVM VZ root ASID (!guestid) */ - uasm_i_mtc0(&p, K0, C0_ENTRYHI); + uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI); skip_asid_restore: uasm_i_ehb(&p); /* Disable RDHWR access */ - uasm_i_mtc0(&p, ZERO, C0_HWRENA); + uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA); /* load the guest context from VCPU and return */ for (i = 1; i < 32; ++i) { /* Guest k0/k1 loaded later */ - if (i == K0 || i == K1) + if (i == GPR_K0 || i == GPR_K1) continue; - UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); + UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); } #ifndef CONFIG_CPU_MIPSR6 /* Restore hi/lo */ - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); - uasm_i_mthi(&p, K0); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); + uasm_i_mthi(&p, GPR_K0); - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); - uasm_i_mtlo(&p, K0); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); + uasm_i_mtlo(&p, GPR_K0); #endif /* Restore the guest's k0/k1 registers */ - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); - UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); + UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); /* Jump to guest */ uasm_i_eret(&p); @@ -444,13 +391,13 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) memset(relocs, 0, sizeof(relocs)); /* Save guest k1 into scratch register */ - UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); + UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); /* Get the VCPU pointer from the VCPU scratch register */ - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); /* Save guest k0 into VCPU structure */ - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); /* * Some of the common tlbex code uses current_cpu_type(). For KVM we @@ -459,13 +406,13 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) preempt_disable(); #ifdef CONFIG_CPU_LOONGSON64 - UASM_i_MFC0(&p, K1, C0_PGD); - uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ + UASM_i_MFC0(&p, GPR_K1, C0_PGD); + uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ #ifndef __PAGETABLE_PMD_FOLDED - uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ + uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ #endif - uasm_i_ldpte(&p, K1, 0); /* even */ - uasm_i_ldpte(&p, K1, 1); /* odd */ + uasm_i_ldpte(&p, GPR_K1, 0); /* even */ + uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ uasm_i_tlbwr(&p); #else /* @@ -480,27 +427,27 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) */ #ifdef CONFIG_64BIT - build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ + build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ #else - build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ + build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ #endif /* we don't support huge pages yet */ - build_get_ptep(&p, K0, K1); - build_update_entries(&p, K0, K1); + build_get_ptep(&p, GPR_K0, GPR_K1); + build_update_entries(&p, GPR_K0, GPR_K1); build_tlb_write_entry(&p, &l, &r, tlb_random); #endif preempt_enable(); /* Get the VCPU pointer from the VCPU scratch register again */ - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); /* Restore the guest's k0/k1 registers */ - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); uasm_i_ehb(&p); - UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); + UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); /* Jump to guest */ uasm_i_eret(&p); @@ -530,14 +477,14 @@ void *kvm_mips_build_exception(void *addr, void *handler) memset(relocs, 0, sizeof(relocs)); /* Save guest k1 into scratch register */ - UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); + UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); /* Get the VCPU pointer from the VCPU scratch register */ - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); - UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); /* Save guest k0 into VCPU structure */ - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); /* Branch to the common handler */ uasm_il_b(&p, &r, label_exit_common); @@ -585,85 +532,85 @@ void *kvm_mips_build_exit(void *addr) /* Start saving Guest context to VCPU */ for (i = 0; i < 32; ++i) { /* Guest k0/k1 saved later */ - if (i == K0 || i == K1) + if (i == GPR_K0 || i == GPR_K1) continue; - UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); + UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); } #ifndef CONFIG_CPU_MIPSR6 /* We need to save hi/lo and restore them on the way out */ - uasm_i_mfhi(&p, T0); - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); + uasm_i_mfhi(&p, GPR_T0); + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); - uasm_i_mflo(&p, T0); - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); + uasm_i_mflo(&p, GPR_T0); + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); #endif /* Finally save guest k1 to VCPU */ uasm_i_ehb(&p); - UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); + UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]); + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); /* Now that context has been saved, we can use other registers */ /* Restore vcpu */ - UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); /* * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * the exception */ - UASM_i_MFC0(&p, K0, C0_EPC); - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); + UASM_i_MFC0(&p, GPR_K0, C0_EPC); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); - UASM_i_MFC0(&p, K0, C0_BADVADDR); - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), - K1); + UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR); + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), + GPR_K1); - uasm_i_mfc0(&p, K0, C0_CAUSE); - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); + uasm_i_mfc0(&p, GPR_K0, C0_CAUSE); + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1); if (cpu_has_badinstr) { - uasm_i_mfc0(&p, K0, C0_BADINSTR); - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, - host_cp0_badinstr), K1); + uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR); + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, + host_cp0_badinstr), GPR_K1); } if (cpu_has_badinstrp) { - uasm_i_mfc0(&p, K0, C0_BADINSTRP); - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, - host_cp0_badinstrp), K1); + uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP); + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, + host_cp0_badinstrp), GPR_K1); } /* Now restore the host state just enough to run the handlers */ /* Switch EBASE to the one used by Linux */ /* load up the host EBASE */ - uasm_i_mfc0(&p, V0, C0_STATUS); + uasm_i_mfc0(&p, GPR_V0, C0_STATUS); - uasm_i_lui(&p, AT, ST0_BEV >> 16); - uasm_i_or(&p, K0, V0, AT); + uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); + uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT); - uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); uasm_i_ehb(&p); - UASM_i_LA_mostly(&p, K0, (long)&ebase); - UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); - build_set_exc_base(&p, K0); + UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase); + UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0); + build_set_exc_base(&p, GPR_K0); if (raw_cpu_has_fpu) { /* * If FPU is enabled, save FCR31 and clear it so that later * ctc1's don't trigger FPE for pending exceptions. */ - uasm_i_lui(&p, AT, ST0_CU1 >> 16); - uasm_i_and(&p, V1, V0, AT); - uasm_il_beqz(&p, &r, V1, label_fpu_1); + uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16); + uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT); + uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1); uasm_i_nop(&p); - uasm_i_cfc1(&p, T0, 31); - uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), - K1); - uasm_i_ctc1(&p, ZERO, 31); + uasm_i_cfc1(&p, GPR_T0, 31); + uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), + GPR_K1); + uasm_i_ctc1(&p, GPR_ZERO, 31); uasm_l_fpu_1(&l, p); } @@ -672,22 +619,22 @@ void *kvm_mips_build_exit(void *addr) * If MSA is enabled, save MSACSR and clear it so that later * instructions don't trigger MSAFPE for pending exceptions. */ - uasm_i_mfc0(&p, T0, C0_CONFIG5); - uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ - uasm_il_beqz(&p, &r, T0, label_msa_1); + uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5); + uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */ + uasm_il_beqz(&p, &r, GPR_T0, label_msa_1); uasm_i_nop(&p); - uasm_i_cfcmsa(&p, T0, MSA_CSR); - uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), - K1); - uasm_i_ctcmsa(&p, MSA_CSR, ZERO); + uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR); + uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), + GPR_K1); + uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO); uasm_l_msa_1(&l, p); } /* Restore host ASID */ if (!cpu_has_guestid) { - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), - K1); - UASM_i_MTC0(&p, K0, C0_ENTRYHI); + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), + GPR_K1); + UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI); } /* @@ -696,56 +643,56 @@ void *kvm_mips_build_exit(void *addr) * - call tlbmiss_handler_setup_pgd(mm->pgd) * - write mm->pgd into CP0_PWBase */ - UASM_i_LW(&p, A0, - offsetof(struct kvm_vcpu_arch, host_pgd), K1); - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); - uasm_i_jalr(&p, RA, T9); + UASM_i_LW(&p, GPR_A0, + offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); + UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); + uasm_i_jalr(&p, GPR_RA, GPR_T9); /* delay slot */ if (cpu_has_htw) - UASM_i_MTC0(&p, A0, C0_PWBASE); + UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); else uasm_i_nop(&p); /* Clear GM bit so we don't enter guest mode when EXL is cleared */ - uasm_i_mfc0(&p, K0, C0_GUESTCTL0); - uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1); - uasm_i_mtc0(&p, K0, C0_GUESTCTL0); + uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); + uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1); + uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); /* Save GuestCtl0 so we can access GExcCode after CPU migration */ - uasm_i_sw(&p, K0, - offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1); + uasm_i_sw(&p, GPR_K0, + offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1); if (cpu_has_guestid) { /* * Clear root mode GuestID, so that root TLB operations use the * root GuestID in the root TLB. */ - uasm_i_mfc0(&p, T0, C0_GUESTCTL1); + uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */ - uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT, + uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT, MIPS_GCTL1_RID_WIDTH); - uasm_i_mtc0(&p, T0, C0_GUESTCTL1); + uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); } /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ - uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); - uasm_i_and(&p, V0, V0, AT); - uasm_i_lui(&p, AT, ST0_CU0 >> 16); - uasm_i_or(&p, V0, V0, AT); + uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); + uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT); + uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16); + uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT); #ifdef CONFIG_64BIT - uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); + uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX); #endif - uasm_i_mtc0(&p, V0, C0_STATUS); + uasm_i_mtc0(&p, GPR_V0, C0_STATUS); uasm_i_ehb(&p); - /* Load up host GP */ - UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); + /* Load up host GPR_GP */ + UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); /* Need a stack before we can jump to "C" */ - UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); + UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); /* Saved host state */ - UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); + UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs)); /* * XXXKYMA do we need to load the host ASID, maybe not because the @@ -753,12 +700,12 @@ void *kvm_mips_build_exit(void *addr) */ /* Restore host scratch registers, as we'll have clobbered them */ - kvm_mips_build_restore_scratch(&p, K0, SP); + kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP); /* Restore RDHWR access */ - UASM_i_LA_mostly(&p, K0, (long)&hwrena); - uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); - uasm_i_mtc0(&p, K0, C0_HWRENA); + UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); + uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); + uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); /* Jump to handler */ /* @@ -766,10 +713,10 @@ void *kvm_mips_build_exit(void *addr) * Now jump to the kvm_mips_handle_exit() to see if we can deal * with this in the kernel */ - uasm_i_move(&p, A0, S0); - UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); - uasm_i_jalr(&p, RA, T9); - UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); + uasm_i_move(&p, GPR_A0, GPR_S0); + UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit); + uasm_i_jalr(&p, GPR_RA, GPR_T9); + UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ); uasm_resolve_relocs(relocs, labels); @@ -799,7 +746,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr) memset(relocs, 0, sizeof(relocs)); /* Return from handler Make sure interrupts are disabled */ - uasm_i_di(&p, ZERO); + uasm_i_di(&p, GPR_ZERO); uasm_i_ehb(&p); /* @@ -808,15 +755,15 @@ static void *kvm_mips_build_ret_from_exit(void *addr) * guest, reload k1 */ - uasm_i_move(&p, K1, S0); - UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); + uasm_i_move(&p, GPR_K1, GPR_S0); + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); /* * Check return value, should tell us if we are returning to the * host (handle I/O etc)or resuming the guest */ - uasm_i_andi(&p, T0, V0, RESUME_HOST); - uasm_il_bnez(&p, &r, T0, label_return_to_host); + uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST); + uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host); uasm_i_nop(&p); p = kvm_mips_build_ret_to_guest(p); @@ -843,24 +790,24 @@ static void *kvm_mips_build_ret_to_guest(void *addr) u32 *p = addr; /* Put the saved pointer to vcpu (s0) back into the scratch register */ - UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); /* Load up the Guest EBASE to minimize the window where BEV is set */ - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); + UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); /* Switch EBASE back to the one used by KVM */ - uasm_i_mfc0(&p, V1, C0_STATUS); - uasm_i_lui(&p, AT, ST0_BEV >> 16); - uasm_i_or(&p, K0, V1, AT); - uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_mfc0(&p, GPR_V1, C0_STATUS); + uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); + uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); uasm_i_ehb(&p); - build_set_exc_base(&p, T0); + build_set_exc_base(&p, GPR_T0); /* Setup status register for running guest in UM */ - uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); - UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); - uasm_i_and(&p, V1, V1, AT); - uasm_i_mtc0(&p, V1, C0_STATUS); + uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE); + UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); + uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT); + uasm_i_mtc0(&p, GPR_V1, C0_STATUS); uasm_i_ehb(&p); p = kvm_mips_build_enter_guest(p); @@ -884,31 +831,31 @@ static void *kvm_mips_build_ret_to_host(void *addr) unsigned int i; /* EBASE is already pointing to Linux */ - UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); - UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); + UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs)); /* * r2/v0 is the return code, shift it down by 2 (arithmetic) * to recover the err code */ - uasm_i_sra(&p, K0, V0, 2); - uasm_i_move(&p, V0, K0); + uasm_i_sra(&p, GPR_K0, GPR_V0, 2); + uasm_i_move(&p, GPR_V0, GPR_K0); /* Load context saved on the host stack */ for (i = 16; i < 31; ++i) { if (i == 24) i = 28; - UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); + UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); } /* Restore RDHWR access */ - UASM_i_LA_mostly(&p, K0, (long)&hwrena); - uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); - uasm_i_mtc0(&p, K0, C0_HWRENA); + UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); + uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); + uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); - /* Restore RA, which is the address we will return to */ - UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); - uasm_i_jr(&p, RA); + /* Restore GPR_RA, which is the address we will return to */ + UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1); + uasm_i_jr(&p, GPR_RA); uasm_i_nop(&p); return p; diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S index 16f17c6390dd..eb2e8cc3532f 100644 --- a/arch/mips/kvm/fpu.S +++ b/arch/mips/kvm/fpu.S @@ -22,7 +22,7 @@ LEAF(__kvm_save_fpu) .set push - SET_HARDFLOAT + .set hardfloat .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? @@ -66,7 +66,7 @@ LEAF(__kvm_save_fpu) LEAF(__kvm_restore_fpu) .set push - SET_HARDFLOAT + .set hardfloat .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? @@ -110,7 +110,7 @@ LEAF(__kvm_restore_fpu) LEAF(__kvm_restore_fcsr) .set push - SET_HARDFLOAT + .set hardfloat lw t0, VCPU_FCR31(a0) /* * The ctc1 must stay at this offset in __kvm_restore_fcsr. diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index e529ea2bb34b..07bc0160bc94 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -37,3 +37,7 @@ u32 kvm_irq_to_priority(u32 irq); int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause); + +#ifdef CONFIG_CPU_LOONGSON64 +extern void kvm_init_loongson_ipi(struct kvm *kvm); +#endif diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c index 5d53f32d837c..6ac83a31148c 100644 --- a/arch/mips/kvm/loongson_ipi.c +++ b/arch/mips/kvm/loongson_ipi.c @@ -10,6 +10,8 @@ #include <linux/kvm_host.h> +#include "interrupt.h" + #define IPI_BASE 0x3ff01000ULL #define CORE0_STATUS_OFF 0x000 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a25e0b73ee70..a75587018f44 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -125,28 +125,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return 1; } -int kvm_arch_hardware_enable(void) +int kvm_arch_enable_virtualization_cpu(void) { - return kvm_mips_callbacks->hardware_enable(); + return kvm_mips_callbacks->enable_virtualization_cpu(); } -void kvm_arch_hardware_disable(void) +void kvm_arch_disable_virtualization_cpu(void) { - kvm_mips_callbacks->hardware_disable(); + kvm_mips_callbacks->disable_virtualization_cpu(); } -int kvm_arch_hardware_setup(void *opaque) -{ - return 0; -} - -int kvm_arch_check_processor_compat(void *opaque) -{ - return 0; -} - -extern void kvm_init_loongson_ipi(struct kvm *kvm); - int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { @@ -209,7 +197,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, /* Flush slot from GPA */ kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, slot->base_gfn + slot->npages - 1); - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + kvm_flush_remote_tlbs_memslot(kvm, slot); spin_unlock(&kvm->mmu_lock); } @@ -245,7 +233,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, new->base_gfn + new->npages - 1); if (needs_flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, new); + kvm_flush_remote_tlbs_memslot(kvm, new); spin_unlock(&kvm->mmu_lock); } } @@ -300,9 +288,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; - hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; + hrtimer_setup(&vcpu->arch.comparecount_timer, kvm_mips_comparecount_wakeup, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); /* * Allocate space for host mode exception handlers that handle @@ -328,7 +315,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) * we allocate is out of range, just give up now. */ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { - kvm_err("CP0_EBase.WG required for guest exception base %pK\n", + kvm_err("CP0_EBase.WG required for guest exception base %p\n", gebase); err = -ENOMEM; goto out_free_gebase; @@ -446,7 +433,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mmio_needed = 0; } - if (vcpu->run->immediate_exit) + if (!vcpu->wants_to_run) goto out; lose_fpu(1); @@ -659,7 +646,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; int ret; s64 v; @@ -771,7 +758,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; s64 v; s64 vs[2]; @@ -991,21 +978,15 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } -int kvm_arch_flush_remote_tlb(struct kvm *kvm) +int kvm_arch_flush_remote_tlbs(struct kvm *kvm) { kvm_mips_callbacks->prepare_flush_shadow(kvm); return 1; } -void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - const struct kvm_memory_slot *memslot) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - kvm_flush_remote_tlbs(kvm); -} - -long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) -{ - long r; + int r; switch (ioctl) { default: @@ -1015,21 +996,6 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return r; } -int kvm_arch_init(void *opaque) -{ - if (kvm_mips_callbacks) { - kvm_err("kvm: module already exists\n"); - return -EEXIST; - } - - return kvm_mips_emulation_init(&kvm_mips_callbacks); -} - -void kvm_arch_exit(void) -{ - kvm_mips_callbacks = NULL; -} - int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -1111,7 +1077,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return kvm_mips_pending_timer(vcpu) || - kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; + kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; } int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) @@ -1135,7 +1101,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); - cop0 = vcpu->arch.cop0; + cop0 = &vcpu->arch.cop0; kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); @@ -1257,7 +1223,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) case EXCCODE_TLBS: kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", - cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, + cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, badvaddr); ++vcpu->stat.tlbmiss_st_exits; @@ -1329,7 +1295,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) kvm_get_badinstr(opc, vcpu, &inst); kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", exccode, opc, inst, badvaddr, - kvm_read_c0_guest_status(vcpu->arch.cop0)); + kvm_read_c0_guest_status(&vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; @@ -1402,7 +1368,7 @@ int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu) /* Enable FPU for guest and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); @@ -1446,7 +1412,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) /* Enable MSA for guest and restore context */ void kvm_own_msa(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); @@ -1646,16 +1612,21 @@ static int __init kvm_mips_init(void) if (ret) return ret; - ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); - + ret = kvm_mips_emulation_init(); if (ret) return ret; + if (boot_cpu_type() == CPU_LOONGSON64) kvm_priority_to_irq = kvm_loongson3_priority_to_irq; register_die_notifier(&kvm_mips_csr_die_notifier); + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + if (ret) { + unregister_die_notifier(&kvm_mips_csr_die_notifier); + return ret; + } return 0; } diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 1bfd1b501d82..d2c3b6b41f18 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void) { pgd_t *ret; - ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) kvm_pgd_init(ret); @@ -122,8 +122,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, if (!cache) return NULL; new_pmd = kvm_mmu_memory_cache_alloc(cache); - pmd_init((unsigned long)new_pmd, - (unsigned long)invalid_pte_table); + pmd_init(new_pmd); pud_populate(NULL, pud, new_pmd); } pmd = pmd_offset(pud, addr); @@ -445,36 +444,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return true; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - gpa_t gpa = range->start << PAGE_SHIFT; - pte_t hva_pte = range->pte; - pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); - pte_t old_pte; - - if (!gpa_pte) - return false; - - /* Mapping may need adjusting depending on memslot flags */ - old_pte = *gpa_pte; - if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) - hva_pte = pte_mkclean(hva_pte); - else if (range->slot->flags & KVM_MEM_READONLY) - hva_pte = pte_wrprotect(hva_pte); - - set_pte(gpa_pte, hva_pte); - - /* Replacing an absent or old page doesn't need flushes */ - if (!pte_present(old_pte) || !pte_young(old_pte)) - return false; - - /* Pages swapped, aged, moved, or cleaned require flushes */ - return !pte_present(hva_pte) || - !pte_young(hva_pte) || - pte_pfn(old_pte) != pte_pfn(hva_pte) || - (pte_dirty(old_pte) && !pte_dirty(hva_pte)); -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); @@ -515,8 +484,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm *kvm = vcpu->kvm; gfn_t gfn = gpa >> PAGE_SHIFT; pte_t *ptep; - kvm_pfn_t pfn = 0; /* silence bogus GCC warning */ - bool pfn_valid = false; int ret = 0; spin_lock(&kvm->mmu_lock); @@ -529,12 +496,9 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, } /* Track access to pages marked old */ - if (!pte_young(*ptep)) { + if (!pte_young(*ptep)) set_pte(ptep, pte_mkyoung(*ptep)); - pfn = pte_pfn(*ptep); - pfn_valid = true; - /* call kvm_set_pfn_accessed() after unlock */ - } + if (write_fault && !pte_dirty(*ptep)) { if (!pte_write(*ptep)) { ret = -EFAULT; @@ -543,9 +507,7 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, /* Track dirtying of writeable pages */ set_pte(ptep, pte_mkdirty(*ptep)); - pfn = pte_pfn(*ptep); mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); } if (out_entry) @@ -555,8 +517,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, out: spin_unlock(&kvm->mmu_lock); - if (pfn_valid) - kvm_set_pfn_accessed(pfn); return ret; } @@ -593,10 +553,11 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, gfn_t gfn = gpa >> PAGE_SHIFT; int srcu_idx, err; kvm_pfn_t pfn; - pte_t *ptep, entry, old_pte; + pte_t *ptep, entry; bool writeable; unsigned long prot_bits; unsigned long mmu_seq; + struct page *page; /* Try the fast path to handle old / clean pages */ srcu_idx = srcu_read_lock(&kvm->srcu); @@ -615,22 +576,22 @@ retry: * Used to check for invalidations in progress, of the pfn that is * returned by pfn_to_pfn_prot below. */ - mmu_seq = kvm->mmu_notifier_seq; + mmu_seq = kvm->mmu_invalidate_seq; /* - * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in - * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads + * in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't * risk the page we get a reference to getting unmapped before we have a - * chance to grab the mmu_lock without mmu_notifier_retry() noticing. + * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. * * This smp_rmb() pairs with the effective smp_wmb() of the combination * of the pte_unmap_unlock() after the PTE is zapped, and the * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before - * mmu_notifier_seq is incremented. + * mmu_invalidate_seq is incremented. */ smp_rmb(); /* Slow path - ask KVM core whether we can access this GPA */ - pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); + pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page); if (is_error_noslot_pfn(pfn)) { err = -EFAULT; goto out; @@ -638,14 +599,14 @@ retry: spin_lock(&kvm->mmu_lock); /* Check if an invalidation has taken place since we got pfn */ - if (mmu_notifier_retry(kvm, mmu_seq)) { + if (mmu_invalidate_retry(kvm, mmu_seq)) { /* * This can happen when mappings are changed asynchronously, but * also synchronously if a COW is triggered by - * gfn_to_pfn_prot(). + * kvm_faultin_pfn(). */ spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); + kvm_release_page_unused(page); goto retry; } @@ -659,13 +620,11 @@ retry: if (write_fault) { prot_bits |= __WRITEABLE; mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); } } entry = pfn_pte(pfn, __pgprot(prot_bits)); /* Write the PTE */ - old_pte = *ptep; set_pte(ptep, entry); err = 0; @@ -674,9 +633,8 @@ retry: if (out_buddy) *out_buddy = *ptep_buddy(ptep); + kvm_release_faultin_page(kvm, page, false, writeable); spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - kvm_set_pfn_accessed(pfn); out: srcu_read_unlock(&kvm->srcu, srcu_idx); return err; diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index 53f851a61554..3e6682018fbe 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c @@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); for (i = 0; i < N_MIPS_COPROC_REGS; i++) { for (j = 0; j < N_MIPS_COPROC_SEL; j++) { - if (vcpu->arch.cop0->stat[i][j]) + if (vcpu->arch.cop0.stat[i][j]) kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, - vcpu->arch.cop0->stat[i][j]); + vcpu->arch.cop0.stat[i][j]); } } #endif diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index a8c7fd7bf6d2..136c3535a1cb 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change, ), TP_fast_assign( - __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0); + __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0); __entry->pc = vcpu->arch.pc; - __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0); - __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0); - __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0); + __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0); + __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0); + __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0); ), TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx", diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index c706f5890a05..ccab4d76b126 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, */ static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; u32 cause, compare; compare = kvm_read_sw_gc0_compare(cop0); @@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, */ static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; u32 gctl0, compare, cause; gctl0 = read_c0_guestctl0(); @@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val) static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; val &= MIPS_MAARI_INDEX; if (val == MIPS_MAARI_INDEX) @@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, u32 *opc, u32 cause, struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; u32 rt, rd, sel; unsigned long curr_pc; @@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 *v) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; unsigned int idx; switch (reg->id) { @@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_MAARI: if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) return -EINVAL; - *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); + *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0); break; #ifdef CONFIG_64BIT case KVM_REG_MIPS_CP0_XCONTEXT: @@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 v) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; unsigned int idx; int ret = 0; unsigned int cur, change; @@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; bool migrated, all; /* @@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; if (current->flags & PF_VCPU) kvm_vz_vcpu_save_wired(vcpu); @@ -2869,7 +2869,7 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) return ret + 1; } -static int kvm_vz_hardware_enable(void) +static int kvm_vz_enable_virtualization_cpu(void) { unsigned int mmu_size, guest_mmu_size, ftlb_size; u64 guest_cvmctl, cvmvmconfig; @@ -2983,7 +2983,7 @@ static int kvm_vz_hardware_enable(void) return 0; } -static void kvm_vz_hardware_disable(void) +static void kvm_vz_disable_virtualization_cpu(void) { u64 cvmvmconfig; unsigned int mmu_size; @@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) { - struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_coproc *cop0 = &vcpu->arch.cop0; unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ /* @@ -3280,8 +3280,8 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, .handle_guest_exit = kvm_trap_vz_handle_guest_exit, - .hardware_enable = kvm_vz_hardware_enable, - .hardware_disable = kvm_vz_hardware_disable, + .enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu, + .disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu, .check_extension = kvm_vz_check_extension, .vcpu_init = kvm_vz_vcpu_init, .vcpu_uninit = kvm_vz_vcpu_uninit, @@ -3304,7 +3304,10 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { .vcpu_reenter = kvm_vz_vcpu_reenter, }; -int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) +/* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */ +const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks; + +int kvm_mips_emulation_init(void) { if (!cpu_has_vz) return -ENODEV; @@ -3318,7 +3321,5 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) return -ENODEV; pr_info("Starting KVM with MIPS VZ extensions\n"); - - *install_callbacks = &kvm_vz_callbacks; return 0; } |