aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/Makefile14
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c140
-rw-r--r--arch/arm64/kvm/hyp/entry.S160
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S33
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S212
-rw-r--r--arch/arm64/kvm/hyp/hyp.h90
-rw-r--r--arch/arm64/kvm/hyp/switch.c175
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c138
-rw-r--r--arch/arm64/kvm/hyp/timer-sr.c71
-rw-r--r--arch/arm64/kvm/hyp/tlb.c80
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-sr.c84
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c228
12 files changed, 1425 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
new file mode 100644
index 000000000000..826032bc3945
--- /dev/null
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
new file mode 100644
index 000000000000..c9c1e97501a9
--- /dev/null
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define read_debug(r,n) read_sysreg(r##n##_el1)
+#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
+
+#define save_debug(ptr,reg,nr) \
+ switch (nr) { \
+ case 15: ptr[15] = read_debug(reg, 15); \
+ case 14: ptr[14] = read_debug(reg, 14); \
+ case 13: ptr[13] = read_debug(reg, 13); \
+ case 12: ptr[12] = read_debug(reg, 12); \
+ case 11: ptr[11] = read_debug(reg, 11); \
+ case 10: ptr[10] = read_debug(reg, 10); \
+ case 9: ptr[9] = read_debug(reg, 9); \
+ case 8: ptr[8] = read_debug(reg, 8); \
+ case 7: ptr[7] = read_debug(reg, 7); \
+ case 6: ptr[6] = read_debug(reg, 6); \
+ case 5: ptr[5] = read_debug(reg, 5); \
+ case 4: ptr[4] = read_debug(reg, 4); \
+ case 3: ptr[3] = read_debug(reg, 3); \
+ case 2: ptr[2] = read_debug(reg, 2); \
+ case 1: ptr[1] = read_debug(reg, 1); \
+ default: ptr[0] = read_debug(reg, 0); \
+ }
+
+#define restore_debug(ptr,reg,nr) \
+ switch (nr) { \
+ case 15: write_debug(ptr[15], reg, 15); \
+ case 14: write_debug(ptr[14], reg, 14); \
+ case 13: write_debug(ptr[13], reg, 13); \
+ case 12: write_debug(ptr[12], reg, 12); \
+ case 11: write_debug(ptr[11], reg, 11); \
+ case 10: write_debug(ptr[10], reg, 10); \
+ case 9: write_debug(ptr[9], reg, 9); \
+ case 8: write_debug(ptr[8], reg, 8); \
+ case 7: write_debug(ptr[7], reg, 7); \
+ case 6: write_debug(ptr[6], reg, 6); \
+ case 5: write_debug(ptr[5], reg, 5); \
+ case 4: write_debug(ptr[4], reg, 4); \
+ case 3: write_debug(ptr[3], reg, 3); \
+ case 2: write_debug(ptr[2], reg, 2); \
+ case 1: write_debug(ptr[1], reg, 1); \
+ default: write_debug(ptr[0], reg, 0); \
+ }
+
+void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt)
+{
+ u64 aa64dfr0;
+ int brps, wrps;
+
+ if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ return;
+
+ aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+ brps = (aa64dfr0 >> 12) & 0xf;
+ wrps = (aa64dfr0 >> 20) & 0xf;
+
+ save_debug(dbg->dbg_bcr, dbgbcr, brps);
+ save_debug(dbg->dbg_bvr, dbgbvr, brps);
+ save_debug(dbg->dbg_wcr, dbgwcr, wrps);
+ save_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+ ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
+}
+
+void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt)
+{
+ u64 aa64dfr0;
+ int brps, wrps;
+
+ if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+ return;
+
+ aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+ brps = (aa64dfr0 >> 12) & 0xf;
+ wrps = (aa64dfr0 >> 20) & 0xf;
+
+ restore_debug(dbg->dbg_bcr, dbgbcr, brps);
+ restore_debug(dbg->dbg_bvr, dbgbvr, brps);
+ restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
+ restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+ write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
+}
+
+void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
+{
+ /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
+ * a full save/restore cycle. */
+ if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
+ (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+
+ __debug_save_state(vcpu, &vcpu->arch.host_debug_state,
+ kern_hyp_va(vcpu->arch.host_cpu_context));
+}
+
+void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
+{
+ __debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
+ kern_hyp_va(vcpu->arch.host_cpu_context));
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
+}
+
+static u32 __hyp_text __debug_read_mdcr_el2(void)
+{
+ return read_sysreg(mdcr_el2);
+}
+
+__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
new file mode 100644
index 000000000000..fd0fbe9b7e6a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/fpsimdmacros.h>
+#include <asm/kvm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+.macro save_callee_saved_regs ctxt
+ stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+ stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+ stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+ stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+ stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+ stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_callee_saved_regs ctxt
+ ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+ ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+ ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+ ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+ ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+ ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+/*
+ * u64 __guest_enter(struct kvm_vcpu *vcpu,
+ * struct kvm_cpu_context *host_ctxt);
+ */
+ENTRY(__guest_enter)
+ // x0: vcpu
+ // x1: host/guest context
+ // x2-x18: clobbered by macros
+
+ // Store the host regs
+ save_callee_saved_regs x1
+
+ // Preserve vcpu & host_ctxt for use at exit time
+ stp x0, x1, [sp, #-16]!
+
+ add x1, x0, #VCPU_CONTEXT
+
+ // Prepare x0-x1 for later restore by pushing them onto the stack
+ ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
+ stp x2, x3, [sp, #-16]!
+
+ // x2-x18
+ ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
+ ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
+ ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
+ ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
+ ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
+ ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
+ ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
+ ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
+ ldr x18, [x1, #CPU_XREG_OFFSET(18)]
+
+ // x19-x29, lr
+ restore_callee_saved_regs x1
+
+ // Last bits of the 64bit state
+ ldp x0, x1, [sp], #16
+
+ // Do not touch any register after this!
+ eret
+ENDPROC(__guest_enter)
+
+ENTRY(__guest_exit)
+ // x0: vcpu
+ // x1: return code
+ // x2-x3: free
+ // x4-x29,lr: vcpu regs
+ // vcpu x0-x3 on the stack
+
+ add x2, x0, #VCPU_CONTEXT
+
+ stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
+ stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
+ stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
+ stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
+ stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
+ stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
+ stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
+ str x18, [x2, #CPU_XREG_OFFSET(18)]
+
+ ldp x6, x7, [sp], #16 // x2, x3
+ ldp x4, x5, [sp], #16 // x0, x1
+
+ stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
+ stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+
+ save_callee_saved_regs x2
+
+ // Restore vcpu & host_ctxt from the stack
+ // (preserving return code in x1)
+ ldp x0, x2, [sp], #16
+ // Now restore the host regs
+ restore_callee_saved_regs x2
+
+ mov x0, x1
+ ret
+ENDPROC(__guest_exit)
+
+ENTRY(__fpsimd_guest_restore)
+ stp x4, lr, [sp, #-16]!
+
+ mrs x2, cptr_el2
+ bic x2, x2, #CPTR_EL2_TFP
+ msr cptr_el2, x2
+ isb
+
+ mrs x3, tpidr_el2
+
+ ldr x0, [x3, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x0
+ add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ bl __fpsimd_save_state
+
+ add x2, x3, #VCPU_CONTEXT
+ add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+ bl __fpsimd_restore_state
+
+ // Skip restoring fpexc32 for AArch64 guests
+ mrs x1, hcr_el2
+ tbnz x1, #HCR_RW_SHIFT, 1f
+ ldr x4, [x3, #VCPU_FPEXC32_EL2]
+ msr fpexc32_el2, x4
+1:
+ ldp x4, lr, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+
+ eret
+ENDPROC(__fpsimd_guest_restore)
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
new file mode 100644
index 000000000000..da3f22c7f14a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/fpsimdmacros.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+ENTRY(__fpsimd_save_state)
+ fpsimd_save x0, 1
+ ret
+ENDPROC(__fpsimd_save_state)
+
+ENTRY(__fpsimd_restore_state)
+ fpsimd_restore x0, 1
+ ret
+ENDPROC(__fpsimd_restore_state)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
new file mode 100644
index 000000000000..93e8d983c0bd
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+.macro save_x0_to_x3
+ stp x0, x1, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+.endm
+
+.macro restore_x0_to_x3
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
+el1_sync: // Guest trapped into EL2
+ save_x0_to_x3
+
+ mrs x1, esr_el2
+ lsr x2, x1, #ESR_ELx_EC_SHIFT
+
+ cmp x2, #ESR_ELx_EC_HVC64
+ b.ne el1_trap
+
+ mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
+ cbnz x3, el1_trap // called HVC
+
+ /* Here, we're pretty sure the host called HVC. */
+ restore_x0_to_x3
+
+ /* Check for __hyp_get_vectors */
+ cbnz x0, 1f
+ mrs x0, vbar_el2
+ b 2f
+
+1: stp lr, xzr, [sp, #-16]!
+
+ /*
+ * Compute the function address in EL2, and shuffle the parameters.
+ */
+ kern_hyp_va x0
+ mov lr, x0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ blr lr
+
+ ldp lr, xzr, [sp], #16
+2: eret
+
+el1_trap:
+ /*
+ * x1: ESR
+ * x2: ESR_EC
+ */
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x2, #ESR_ELx_EC_FP_ASIMD
+ b.eq __fpsimd_guest_restore
+
+ cmp x2, #ESR_ELx_EC_DABT_LOW
+ mov x0, #ESR_ELx_EC_IABT_LOW
+ ccmp x2, x0, #4, ne
+ b.ne 1f // Not an abort we care about
+
+ /* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
+ and x2, x1, #ESR_ELx_FSC_TYPE
+ cmp x2, #FSC_PERM
+ b.ne 1f // Not a permission fault
+alternative_else
+ nop // Use the permission fault path to
+ nop // check for a valid S1 translation,
+ nop // regardless of the ESR value.
+alternative_endif
+
+ /*
+ * Check for Stage-1 page table walk, which is guaranteed
+ * to give a valid HPFAR_EL2.
+ */
+ tbnz x1, #7, 1f // S1PTW is set
+
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ stp x3, xzr, [sp, #-16]!
+
+ /*
+ * Permission fault, HPFAR_EL2 is invalid.
+ * Resolve the IPA the hard way using the guest VA.
+ * Stage-1 translation already validated the memory access rights.
+ * As such, we can use the EL1 translation regime, and don't have
+ * to distinguish between EL0 and EL1 access.
+ */
+ mrs x2, far_el2
+ at s1e1r, x2
+ isb
+
+ /* Read result */
+ mrs x3, par_el1
+ ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
+ msr par_el1, x0
+ tbnz x3, #0, 3f // Bail out if we failed the translation
+ ubfx x3, x3, #12, #36 // Extract IPA
+ lsl x3, x3, #4 // and present it like HPFAR
+ b 2f
+
+1: mrs x3, hpfar_el2
+ mrs x2, far_el2
+
+2: mrs x0, tpidr_el2
+ str w1, [x0, #VCPU_ESR_EL2]
+ str x2, [x0, #VCPU_FAR_EL2]
+ str x3, [x0, #VCPU_HPFAR_EL2]
+
+ mov x1, #ARM_EXCEPTION_TRAP
+ b __guest_exit
+
+ /*
+ * Translation failed. Just return to the guest and
+ * let it fault again. Another CPU is probably playing
+ * behind our back.
+ */
+3: restore_x0_to_x3
+
+ eret
+
+el1_irq:
+ save_x0_to_x3
+ mrs x0, tpidr_el2
+ mov x1, #ARM_EXCEPTION_IRQ
+ b __guest_exit
+
+ENTRY(__hyp_do_panic)
+ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+ PSR_MODE_EL1h)
+ msr spsr_el2, lr
+ ldr lr, =panic
+ msr elr_el2, lr
+ eret
+ENDPROC(__hyp_do_panic)
+
+.macro invalid_vector label, target = __hyp_panic
+ .align 2
+\label:
+ b \target
+ENDPROC(\label)
+.endm
+
+ /* None of these should ever happen */
+ invalid_vector el2t_sync_invalid
+ invalid_vector el2t_irq_invalid
+ invalid_vector el2t_fiq_invalid
+ invalid_vector el2t_error_invalid
+ invalid_vector el2h_sync_invalid
+ invalid_vector el2h_irq_invalid
+ invalid_vector el2h_fiq_invalid
+ invalid_vector el2h_error_invalid
+ invalid_vector el1_sync_invalid
+ invalid_vector el1_irq_invalid
+ invalid_vector el1_fiq_invalid
+ invalid_vector el1_error_invalid
+
+ .ltorg
+
+ .align 11
+
+ENTRY(__kvm_hyp_vector)
+ ventry el2t_sync_invalid // Synchronous EL2t
+ ventry el2t_irq_invalid // IRQ EL2t
+ ventry el2t_fiq_invalid // FIQ EL2t
+ ventry el2t_error_invalid // Error EL2t
+
+ ventry el2h_sync_invalid // Synchronous EL2h
+ ventry el2h_irq_invalid // IRQ EL2h
+ ventry el2h_fiq_invalid // FIQ EL2h
+ ventry el2h_error_invalid // Error EL2h
+
+ ventry el1_sync // Synchronous 64-bit EL1
+ ventry el1_irq // IRQ 64-bit EL1
+ ventry el1_fiq_invalid // FIQ 64-bit EL1
+ ventry el1_error_invalid // Error 64-bit EL1
+
+ ventry el1_sync // Synchronous 32-bit EL1
+ ventry el1_irq // IRQ 32-bit EL1
+ ventry el1_fiq_invalid // FIQ 32-bit EL1
+ ventry el1_error_invalid // Error 32-bit EL1
+ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
new file mode 100644
index 000000000000..fb275178b6af
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
+#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
+ + PAGE_OFFSET)
+
+/**
+ * hyp_alternate_select - Generates patchable code sequences that are
+ * used to switch between two implementations of a function, depending
+ * on the availability of a feature.
+ *
+ * @fname: a symbol name that will be defined as a function returning a
+ * function pointer whose type will match @orig and @alt
+ * @orig: A pointer to the default function, as returned by @fname when
+ * @cond doesn't hold
+ * @alt: A pointer to the alternate function, as returned by @fname
+ * when @cond holds
+ * @cond: a CPU feature (as described in asm/cpufeature.h)
+ */
+#define hyp_alternate_select(fname, orig, alt, cond) \
+typeof(orig) * __hyp_text fname(void) \
+{ \
+ typeof(alt) *val = orig; \
+ asm volatile(ALTERNATIVE("nop \n", \
+ "mov %0, %1 \n", \
+ cond) \
+ : "+r" (val) : "r" (alt)); \
+ return val; \
+}
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
+
+void __debug_save_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_restore_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
+void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
+
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+static inline bool __fpsimd_enabled(void)
+{
+ return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+}
+
+u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
new file mode 100644
index 000000000000..ca8f5a5e2f96
--- /dev/null
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ /*
+ * We are about to set CPTR_EL2.TFP to trap all floating point
+ * register accesses to EL2, however, the ARM ARM clearly states that
+ * traps are only taken to EL2 if the operation would not otherwise
+ * trap to EL1. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ */
+ val = vcpu->arch.hcr_el2;
+ if (!(val & HCR_RW)) {
+ write_sysreg(1 << 30, fpexc32_el2);
+ isb();
+ }
+ write_sysreg(val, hcr_el2);
+ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
+ write_sysreg(1 << 15, hstr_el2);
+ write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+ write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+ write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(0, hstr_el2);
+ write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
+ write_sysreg(0, cptr_el2);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+ write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__vgic_call_save_state,
+ __vgic_v2_save_state, __vgic_v3_save_state,
+ ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static hyp_alternate_select(__vgic_call_restore_state,
+ __vgic_v2_restore_state, __vgic_v3_restore_state,
+ ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+ __vgic_call_save_state()(vcpu);
+ write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+ u64 val;
+
+ val = read_sysreg(hcr_el2);
+ val |= HCR_INT_OVERRIDE;
+ val |= vcpu->arch.irq_lines;
+ write_sysreg(val, hcr_el2);
+
+ __vgic_call_restore_state()(vcpu);
+}
+
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_cpu_context *guest_ctxt;
+ bool fp_enabled;
+ u64 exit_code;
+
+ vcpu = kern_hyp_va(vcpu);
+ write_sysreg(vcpu, tpidr_el2);
+
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ guest_ctxt = &vcpu->arch.ctxt;
+
+ __sysreg_save_state(host_ctxt);
+ __debug_cond_save_host_state(vcpu);
+
+ __activate_traps(vcpu);
+ __activate_vm(vcpu);
+
+ __vgic_restore_state(vcpu);
+ __timer_restore_state(vcpu);
+
+ /*
+ * We must restore the 32-bit state before the sysregs, thanks
+ * to Cortex-A57 erratum #852523.
+ */
+ __sysreg32_restore_state(vcpu);
+ __sysreg_restore_state(guest_ctxt);
+ __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+
+ /* Jump in the fire! */
+ exit_code = __guest_enter(vcpu, host_ctxt);
+ /* And we're baaack! */
+
+ fp_enabled = __fpsimd_enabled();
+
+ __sysreg_save_state(guest_ctxt);
+ __sysreg32_save_state(vcpu);
+ __timer_save_state(vcpu);
+ __vgic_save_state(vcpu);
+
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+
+ __sysreg_restore_state(host_ctxt);
+
+ if (fp_enabled) {
+ __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
+ __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+ }
+
+ __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+ __debug_cond_restore_host_state(vcpu);
+
+ return exit_code;
+}
+
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+ unsigned long str_va = (unsigned long)__hyp_panic_string;
+ u64 spsr = read_sysreg(spsr_el2);
+ u64 elr = read_sysreg(elr_el2);
+ u64 par = read_sysreg(par_el1);
+
+ if (read_sysreg(vttbr_el2)) {
+ struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *host_ctxt;
+
+ vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+ __sysreg_restore_state(host_ctxt);
+ }
+
+ /* Call panic for real */
+ __hyp_do_panic(hyp_kern_va(str_va),
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg(far_el2),
+ read_sysreg(hpfar_el2), par,
+ (void *)read_sysreg(tpidr_el2));
+
+ unreachable();
+}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
new file mode 100644
index 000000000000..425630980229
--- /dev/null
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* ctxt is already in the HYP VA space */
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
+ ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
+ ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1);
+ ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
+ ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1);
+ ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1);
+ ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1);
+ ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
+ ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
+ ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1);
+ ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1);
+ ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
+ ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1);
+ ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1);
+ ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1);
+ ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
+ ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
+ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
+ ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
+
+ ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
+ ctxt->gp_regs.regs.pc = read_sysreg(elr_el2);
+ ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2);
+ ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
+ ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+ write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
+ write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
+ write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1);
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1);
+ write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1);
+ write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1);
+ write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
+ write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
+ write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1);
+ write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1);
+ write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
+ write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1);
+ write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1);
+ write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1);
+ write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
+ write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+
+ write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
+ write_sysreg(ctxt->gp_regs.regs.pc, elr_el2);
+ write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
+ write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
+ write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
+ write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
+}
+
+void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
+{
+ u64 *spsr, *sysreg;
+
+ if (read_sysreg(hcr_el2) & HCR_RW)
+ return;
+
+ spsr = vcpu->arch.ctxt.gp_regs.spsr;
+ sysreg = vcpu->arch.ctxt.sys_regs;
+
+ spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
+ spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
+ spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
+ spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
+
+ sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
+ sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
+
+ if (__fpsimd_enabled())
+ sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
+}
+
+void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+{
+ u64 *spsr, *sysreg;
+
+ if (read_sysreg(hcr_el2) & HCR_RW)
+ return;
+
+ spsr = vcpu->arch.ctxt.gp_regs.spsr;
+ sysreg = vcpu->arch.ctxt.sys_regs;
+
+ write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
+ write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
+ write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
+ write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
+
+ write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
+ write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
+
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+}
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c
new file mode 100644
index 000000000000..1051e5d7320f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/timer-sr.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <clocksource/arm_arch_timer.h>
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 val;
+
+ if (kvm->arch.timer.enabled) {
+ timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
+ timer->cntv_cval = read_sysreg(cntv_cval_el0);
+ }
+
+ /* Disable the virtual timer */
+ write_sysreg(0, cntv_ctl_el0);
+
+ /* Allow physical timer/counter access for the host */
+ val = read_sysreg(cnthctl_el2);
+ val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ write_sysreg(val, cnthctl_el2);
+
+ /* Clear cntvoff for the host */
+ write_sysreg(0, cntvoff_el2);
+}
+
+void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ u64 val;
+
+ /*
+ * Disallow physical timer access for the guest
+ * Physical counter access is allowed
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~CNTHCTL_EL1PCEN;
+ val |= CNTHCTL_EL1PCTEN;
+ write_sysreg(val, cnthctl_el2);
+
+ if (kvm->arch.timer.enabled) {
+ write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+ write_sysreg(timer->cntv_cval, cntv_cval_el0);
+ isb();
+ write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+ }
+}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
new file mode 100644
index 000000000000..2a7e0d838698
--- /dev/null
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ /*
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ ipa >>= 12;
+ asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
+
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb(ish);
+ asm volatile("tlbi vmalle1is" : : );
+ dsb(ish);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+ phys_addr_t ipa);
+
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+
+ asm volatile("tlbi vmalls12e1is" : : );
+ dsb(ish);
+ isb();
+
+ write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+static void __hyp_text __tlb_flush_vm_context(void)
+{
+ dsb(ishst);
+ asm volatile("tlbi alle1is \n"
+ "ic ialluis ": : );
+ dsb(ish);
+}
+
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
new file mode 100644
index 000000000000..e71761238cfc
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ u32 eisr0, eisr1, elrsr0, elrsr1;
+ int i, nr_lr;
+
+ if (!base)
+ return;
+
+ nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+ cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+ eisr0 = readl_relaxed(base + GICH_EISR0);
+ elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+ if (unlikely(nr_lr > 32)) {
+ eisr1 = readl_relaxed(base + GICH_EISR1);
+ elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+ } else {
+ eisr1 = elrsr1 = 0;
+ }
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
+ cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+ cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
+ cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+ cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
+
+ writel_relaxed(0, base + GICH_HCR);
+
+ for (i = 0; i < nr_lr; i++)
+ cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ int i, nr_lr;
+
+ if (!base)
+ return;
+
+ writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+ writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+ writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+
+ nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ for (i = 0; i < nr_lr; i++)
+ writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
new file mode 100644
index 000000000000..9142e082f5f3
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define vtr_to_max_lr_idx(v) ((v) & 0xf)
+#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
+
+#define read_gicreg(r) \
+ ({ \
+ u64 reg; \
+ asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+ reg; \
+ })
+
+#define write_gicreg(v,r) \
+ do { \
+ u64 __val = (v); \
+ asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+ } while (0)
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val;
+ u32 max_lr_idx, nr_pri_bits;
+
+ /*
+ * Make sure stores to the GIC via the memory mapped interface
+ * are now visible to the system register interface.
+ */
+ dsb(st);
+
+ cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
+ cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
+ cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
+ cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
+
+ write_gicreg(0, ICH_HCR_EL2);
+ val = read_gicreg(ICH_VTR_EL2);
+ max_lr_idx = vtr_to_max_lr_idx(val);
+ nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+ switch (max_lr_idx) {
+ case 15:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
+ case 14:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
+ case 13:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
+ case 12:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
+ case 11:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
+ case 10:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
+ case 9:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
+ case 8:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
+ case 7:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
+ case 6:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
+ case 5:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
+ case 4:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
+ case 3:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
+ case 2:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
+ case 1:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
+ case 0:
+ cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
+ cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+ case 6:
+ cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+ default:
+ cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
+ cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+ case 6:
+ cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+ default:
+ cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+ }
+
+ val = read_gicreg(ICC_SRE_EL2);
+ write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
+ isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+ write_gicreg(1, ICC_SRE_EL1);
+}
+
+void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val;
+ u32 max_lr_idx, nr_pri_bits;
+
+ /*
+ * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
+ * Group0 interrupt (as generated in GICv2 mode) to be
+ * delivered as a FIQ to the guest, with potentially fatal
+ * consequences. So we must make sure that ICC_SRE_EL1 has
+ * been actually programmed with the value we want before
+ * starting to mess with the rest of the GIC.
+ */
+ write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
+ isb();
+
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+ write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
+
+ val = read_gicreg(ICH_VTR_EL2);
+ max_lr_idx = vtr_to_max_lr_idx(val);
+ nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+ write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
+ write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+ }
+
+ switch (max_lr_idx) {
+ case 15:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
+ case 14:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
+ case 13:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
+ case 12:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
+ case 11:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
+ case 10:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
+ case 9:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
+ case 8:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
+ case 7:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
+ case 5:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
+ case 4:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
+ case 3:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
+ case 2:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
+ case 1:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
+ case 0:
+ write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
+ }
+
+ /*
+ * Ensures that the above will have reached the
+ * (re)distributors. This ensure the guest will read the
+ * correct values from the memory-mapped interface.
+ */
+ isb();
+ dsb(sy);
+
+ /*
+ * Prevent the guest from touching the GIC system registers if
+ * SRE isn't enabled for GICv3 emulation.
+ */
+ if (!cpu_if->vgic_sre) {
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+ ICC_SRE_EL2);
+ }
+}
+
+static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
+{
+ return read_gicreg(ICH_VTR_EL2);
+}
+
+__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);