aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp/entry.S1
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S4
-rw-r--r--arch/arm64/kvm/hyp/switch.c68
-rw-r--r--arch/arm64/kvm/hyp/tlb.c71
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c20
-rw-r--r--arch/arm64/kvm/sys_regs.h4
-rw-r--r--arch/arm64/kvm/trace.h35
10 files changed, 174 insertions, 94 deletions
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 00d422336a45..f39801e4136c 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -236,24 +236,3 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
}
}
}
-
-
-/*
- * After successfully emulating an instruction, we might want to
- * return to user space with a KVM_EXIT_DEBUG. We can only do this
- * once the emulation is complete, though, so for userspace emulations
- * we have to wait until we have re-entered KVM before calling this
- * helper.
- *
- * Return true (and set exit_reason) to return to userspace or false
- * if no further action is required.
- */
-bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
- return true;
- }
- return false;
-}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 35a81bebd02b..0b7983442071 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -173,6 +173,23 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/*
+ * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
+ * a NOP).
+ */
+static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /*
+ * We don't currently support ptrauth in a guest, and we mask the ID
+ * registers to prevent well-behaved guests from trying to make use of
+ * it.
+ *
+ * Inject an UNDEF, as if the feature really isn't present.
+ */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -195,6 +212,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
+ [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
@@ -229,13 +247,6 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
handled = exit_handler(vcpu, run);
}
- /*
- * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
- * structure if we need to return to userspace.
- */
- if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
- handled = 0;
-
return handled;
}
@@ -269,12 +280,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- /* We may still need to return for single-step */
- if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
- && kvm_arm_handle_step_debug(vcpu, run))
- return 0;
- else
- return 1;
+ return 1;
case ARM_EXCEPTION_TRAP:
return handle_trap_exceptions(vcpu, run);
case ARM_EXCEPTION_HYP_GONE:
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fad1e164fe48..675fdc186e3b 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -83,6 +83,7 @@ ENTRY(__guest_enter)
// Do not touch any register after this!
eret
+ sb
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index b1f14f736962..73c1b483ec39 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -96,6 +96,7 @@ el1_sync: // Guest trapped into EL2
do_el2_call
eret
+ sb
el1_hvc_guest:
/*
@@ -146,6 +147,7 @@ wa_epilogue:
mov x0, xzr
add sp, sp, #16
eret
+ sb
el1_trap:
get_vcpu_ptr x1, x0
@@ -199,6 +201,7 @@ el2_error:
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
+ sb
ENTRY(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -207,6 +210,7 @@ ENTRY(__hyp_do_panic)
ldr lr, =panic
msr elr_el2, lr
eret
+ sb
ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 7cc175c88a37..b0b1478094b4 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -143,6 +143,14 @@ static void deactivate_traps_vhe(void)
{
extern char vectors[]; /* kernel exception vectors */
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+
+ /*
+ * ARM erratum 1165522 requires the actual execution of the above
+ * before we can switch to the EL2/EL0 translation regime used by
+ * the host.
+ */
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
@@ -157,7 +165,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
- write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}
@@ -305,33 +313,6 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-/* Skip an instruction which has been emulated. Returns true if
- * execution can continue or false if we need to exit hyp mode because
- * single-step was in effect.
- */
-static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
-{
- *vcpu_pc(vcpu) = read_sysreg_el2(elr);
-
- if (vcpu_mode_is_32bit(vcpu)) {
- vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
- kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
- } else {
- *vcpu_pc(vcpu) += 4;
- }
-
- write_sysreg_el2(*vcpu_pc(vcpu), elr);
-
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- vcpu->arch.fault.esr_el2 =
- (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
- return false;
- } else {
- return true;
- }
-}
-
static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
{
struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
@@ -420,20 +401,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
- if (ret == -1) {
- /* Promote an illegal access to an
- * SError. If we would be returning
- * due to single-step clear the SS
- * bit so handle_exit knows what to
- * do after dealing with the error.
- */
- if (!__skip_instr(vcpu))
- *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+ /* Promote an illegal access to an SError.*/
+ if (ret == -1)
*exit_code = ARM_EXCEPTION_EL1_SERROR;
- }
goto exit;
}
@@ -444,7 +417,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
}
@@ -499,8 +472,19 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
- __activate_traps(vcpu);
+ /*
+ * ARM erratum 1165522 requires us to configure both stage 1 and
+ * stage 2 translation for the guest context before we clear
+ * HCR_EL2.TGE.
+ *
+ * We have already configured the guest's stage 1 translation in
+ * kvm_vcpu_load_sysregs above. We must now call __activate_vm
+ * before __activate_traps, because __activate_vm configures
+ * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
+ * (among other things).
+ */
__activate_vm(vcpu->kvm);
+ __activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@@ -545,8 +529,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_save_state_nvhe(host_ctxt);
- __activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
+ __activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 4dbd9c69a96d..76c30866069e 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,20 +15,54 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+struct tlb_inv_context {
+ unsigned long flags;
+ u64 tcr;
+ u64 sctlr;
+};
+
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
u64 val;
+ local_irq_save(cxt->flags);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /*
+ * For CPUs that are affected by ARM erratum 1165522, we
+ * cannot trust stage-1 to be in a correct state at that
+ * point. Since we do not want to force a full load of the
+ * vcpu state, we prevent the EL1 page-table walker to
+ * allocate new TLBs. This is done by setting the EPD bits
+ * in the TCR_EL1 register. We also need to prevent it to
+ * allocate IPA->PA walks, so we enable the S1 MMU...
+ */
+ val = cxt->tcr = read_sysreg_el1(tcr);
+ val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
+ write_sysreg_el1(val, tcr);
+ val = cxt->sctlr = read_sysreg_el1(sctlr);
+ val |= SCTLR_ELx_M;
+ write_sysreg_el1(val, sctlr);
+ }
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
+ *
+ * ARM erratum 1165522 requires some special handling (again),
+ * as we need to make sure both stages of translation are in
+ * place before clearing TGE. __load_guest_stage2() already
+ * has an ISB in order to deal with this.
*/
__load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
@@ -37,7 +71,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
__load_guest_stage2(kvm);
isb();
@@ -48,7 +83,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -56,9 +92,19 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /* Restore the registers to what they were */
+ write_sysreg_el1(cxt->tcr, tcr);
+ write_sysreg_el1(cxt->sctlr, sctlr);
+ }
+
+ local_irq_restore(cxt->flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
}
@@ -70,11 +116,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -117,36 +165,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 215c7c0eb3b0..9cbdd034a563 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -41,7 +41,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
* Returns:
* 1: GICV access successfully performed
* 0: Not a GICV access
- * -1: Illegal GICV access
+ * -1: Illegal GICV access successfully performed
*/
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
@@ -61,12 +61,16 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
return 0;
/* Reject anything but a 32bit access */
- if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
+ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
/* Not aligned? Don't bother */
- if (fault_ipa & 3)
+ if (fault_ipa & 3) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
rd = kvm_vcpu_dabt_get_rd(vcpu);
addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
@@ -88,5 +92,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, rd, data);
}
+ __kvm_skip_instr(vcpu);
+
return 1;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 22fbbdbece3c..e3e37228ae4e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -76,7 +76,7 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
return false;
}
-u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
+u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
{
if (!vcpu->arch.sysregs_loaded_on_cpu)
goto immediate_read;
@@ -1040,6 +1040,14 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
kvm_debug("SVE unsupported for guests, suppressing\n");
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
+ } else if (id == SYS_ID_AA64ISAR1_EL1) {
+ const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_API_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
+ if (val & ptrauth_mask)
+ kvm_debug("ptrauth unsupported for guests, suppressing\n");
+ val &= ~ptrauth_mask;
} else if (id == SYS_ID_AA64MMFR1_EL1) {
if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
kvm_debug("LORegions unsupported for guests, suppressing\n");
@@ -1850,6 +1858,8 @@ static void perform_access(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
{
+ trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
+
/*
* Not having an accessor means that we have configured a trap
* that we don't know how to handle. This certainly qualifies
@@ -1912,8 +1922,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
WARN_ON(1);
}
- kvm_err("Unsupported guest CP%d access at: %08lx\n",
- cp, *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
+ cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
@@ -2063,8 +2073,8 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
if (likely(r)) {
perform_access(vcpu, params, r);
} else {
- kvm_err("Unsupported guest sys_reg access at: %lx\n",
- *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index cd710f8b63e0..3b1bc7f01d0b 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -35,6 +35,9 @@ struct sys_reg_params {
};
struct sys_reg_desc {
+ /* Sysreg string for debug */
+ const char *name;
+
/* MRS/MSR instruction which accesses it. */
u8 Op0;
u8 Op1;
@@ -130,6 +133,7 @@ const struct sys_reg_desc *find_reg_by_id(u64 id,
#define Op2(_x) .Op2 = _x
#define SYS_DESC(reg) \
+ .name = #reg, \
Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
Op2(sys_reg_Op2(reg))
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 3b82fb1ddd09..eab91ad0effb 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -3,6 +3,7 @@
#define _TRACE_ARM64_KVM_H
#include <linux/tracepoint.h>
+#include "sys_regs.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -152,6 +153,40 @@ TRACE_EVENT(kvm_handle_sys_reg,
TP_printk("HSR 0x%08lx", __entry->hsr)
);
+TRACE_EVENT(kvm_sys_access,
+ TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg),
+ TP_ARGS(vcpu_pc, params, reg),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(bool, is_write)
+ __field(const char *, name)
+ __field(u8, Op0)
+ __field(u8, Op1)
+ __field(u8, CRn)
+ __field(u8, CRm)
+ __field(u8, Op2)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->is_write = params->is_write;
+ __entry->name = reg->name;
+ __entry->Op0 = reg->Op0;
+ __entry->Op0 = reg->Op0;
+ __entry->Op1 = reg->Op1;
+ __entry->CRn = reg->CRn;
+ __entry->CRm = reg->CRm;
+ __entry->Op2 = reg->Op2;
+ ),
+
+ TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s",
+ __entry->vcpu_pc, __entry->name ?: "UNKN",
+ __entry->Op0, __entry->Op1, __entry->CRn,
+ __entry->CRm, __entry->Op2,
+ __entry->is_write ? "write" : "read")
+);
+
TRACE_EVENT(kvm_set_guest_debug,
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
TP_ARGS(vcpu, guest_debug),