diff options
Diffstat (limited to 'arch/x86/kvm/trace.h')
-rw-r--r-- | arch/x86/kvm/trace.h | 596 |
1 files changed, 452 insertions, 144 deletions
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index f194dd058470..bc25589ad588 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -15,18 +15,20 @@ * Tracepoint for guest mode entry. */ TRACE_EVENT(kvm_entry, - TP_PROTO(unsigned int vcpu_id), - TP_ARGS(vcpu_id), + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), TP_STRUCT__entry( __field( unsigned int, vcpu_id ) + __field( unsigned long, rip ) ), TP_fast_assign( - __entry->vcpu_id = vcpu_id; + __entry->vcpu_id = vcpu->vcpu_id; + __entry->rip = kvm_rip_read(vcpu); ), - TP_printk("vcpu %u", __entry->vcpu_id) + TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) ); /* @@ -62,9 +64,9 @@ TRACE_EVENT(kvm_hypercall, * Tracepoint for hypercall. */ TRACE_EVENT(kvm_hv_hypercall, - TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, - __u64 ingpa, __u64 outgpa), - TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), + TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, + __u16 rep_idx, __u64 ingpa, __u64 outgpa), + TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), TP_STRUCT__entry( __field( __u16, rep_cnt ) @@ -72,6 +74,7 @@ TRACE_EVENT(kvm_hv_hypercall, __field( __u64, ingpa ) __field( __u64, outgpa ) __field( __u16, code ) + __field( __u16, var_cnt ) __field( bool, fast ) ), @@ -81,16 +84,68 @@ TRACE_EVENT(kvm_hv_hypercall, __entry->ingpa = ingpa; __entry->outgpa = outgpa; __entry->code = code; + __entry->var_cnt = var_cnt; __entry->fast = fast; ), - TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", + TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", __entry->code, __entry->fast ? "fast" : "slow", - __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, - __entry->outgpa) + __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, + __entry->ingpa, __entry->outgpa) +); + +TRACE_EVENT(kvm_hv_hypercall_done, + TP_PROTO(u64 result), + TP_ARGS(result), + + TP_STRUCT__entry( + __field(__u64, result) + ), + + TP_fast_assign( + __entry->result = result; + ), + + TP_printk("result 0x%llx", __entry->result) ); /* + * Tracepoint for Xen hypercall. + */ +TRACE_EVENT(kvm_xen_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5), + TP_ARGS(nr, a0, a1, a2, a3, a4, a5), + + TP_STRUCT__entry( + __field(unsigned long, nr) + __field(unsigned long, a0) + __field(unsigned long, a1) + __field(unsigned long, a2) + __field(unsigned long, a3) + __field(unsigned long, a4) + __field(unsigned long, a5) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + __entry->a4 = a4; + __entry->a4 = a5; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3, __entry->a4, __entry->a5) +); + + + +/* * Tracepoint for PIO. */ @@ -99,7 +154,7 @@ TRACE_EVENT(kvm_hv_hypercall, TRACE_EVENT(kvm_pio, TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, - unsigned int count, void *data), + unsigned int count, const void *data), TP_ARGS(rw, port, size, count, data), TP_STRUCT__entry( @@ -151,32 +206,38 @@ TRACE_EVENT(kvm_fast_mmio, * Tracepoint for cpuid. */ TRACE_EVENT(kvm_cpuid, - TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, - unsigned long rcx, unsigned long rdx, bool found), - TP_ARGS(function, rax, rbx, rcx, rdx, found), + TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, + unsigned long rbx, unsigned long rcx, unsigned long rdx, + bool found, bool used_max_basic), + TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), TP_STRUCT__entry( __field( unsigned int, function ) + __field( unsigned int, index ) __field( unsigned long, rax ) __field( unsigned long, rbx ) __field( unsigned long, rcx ) __field( unsigned long, rdx ) __field( bool, found ) + __field( bool, used_max_basic ) ), TP_fast_assign( __entry->function = function; + __entry->index = index; __entry->rax = rax; __entry->rbx = rbx; __entry->rcx = rcx; __entry->rdx = rdx; __entry->found = found; + __entry->used_max_basic = used_max_basic; ), - TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s", - __entry->function, __entry->rax, + TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", + __entry->function, __entry->index, __entry->rax, __entry->rbx, __entry->rcx, __entry->rdx, - __entry->found ? "found" : "not found") + __entry->found ? "found" : "not found", + __entry->used_max_basic ? ", used max basic" : "") ); #define AREG(x) { APIC_##x, "APIC_" #x } @@ -192,13 +253,13 @@ TRACE_EVENT(kvm_cpuid, * Tracepoint for apic access. */ TRACE_EVENT(kvm_apic, - TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), + TP_PROTO(unsigned int rw, unsigned int reg, u64 val), TP_ARGS(rw, reg, val), TP_STRUCT__entry( __field( unsigned int, rw ) __field( unsigned int, reg ) - __field( unsigned int, val ) + __field( u64, val ) ), TP_fast_assign( @@ -207,7 +268,7 @@ TRACE_EVENT(kvm_apic, __entry->val = val; ), - TP_printk("apic_%s %s = 0x%x", + TP_printk("apic_%s %s = 0x%llx", __entry->rw ? "write" : "read", __print_symbolic(__entry->reg, kvm_trace_symbol_apic), __entry->val) @@ -219,55 +280,77 @@ TRACE_EVENT(kvm_apic, #define KVM_ISA_VMX 1 #define KVM_ISA_SVM 2 +#define kvm_print_exit_reason(exit_reason, isa) \ + (isa == KVM_ISA_VMX) ? \ + __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ + __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ + (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ + (isa == KVM_ISA_VMX) ? \ + __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" + +#define TRACE_EVENT_KVM_EXIT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ + TP_ARGS(vcpu, isa), \ + \ + TP_STRUCT__entry( \ + __field( unsigned int, exit_reason ) \ + __field( unsigned long, guest_rip ) \ + __field( u32, isa ) \ + __field( u64, info1 ) \ + __field( u64, info2 ) \ + __field( u32, intr_info ) \ + __field( u32, error_code ) \ + __field( unsigned int, vcpu_id ) \ + ), \ + \ + TP_fast_assign( \ + __entry->guest_rip = kvm_rip_read(vcpu); \ + __entry->isa = isa; \ + __entry->vcpu_id = vcpu->vcpu_id; \ + static_call(kvm_x86_get_exit_info)(vcpu, \ + &__entry->exit_reason, \ + &__entry->info1, \ + &__entry->info2, \ + &__entry->intr_info, \ + &__entry->error_code); \ + ), \ + \ + TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ + "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ + __entry->vcpu_id, \ + kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ + __entry->guest_rip, __entry->info1, __entry->info2, \ + __entry->intr_info, __entry->error_code) \ +) + /* * Tracepoint for kvm guest exit: */ -TRACE_EVENT(kvm_exit, - TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), - TP_ARGS(exit_reason, vcpu, isa), - - TP_STRUCT__entry( - __field( unsigned int, exit_reason ) - __field( unsigned long, guest_rip ) - __field( u32, isa ) - __field( u64, info1 ) - __field( u64, info2 ) - __field( unsigned int, vcpu_id ) - ), - - TP_fast_assign( - __entry->exit_reason = exit_reason; - __entry->guest_rip = kvm_rip_read(vcpu); - __entry->isa = isa; - __entry->vcpu_id = vcpu->vcpu_id; - kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, - &__entry->info2); - ), - - TP_printk("vcpu %u reason %s rip 0x%lx info %llx %llx", - __entry->vcpu_id, - (__entry->isa == KVM_ISA_VMX) ? - __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) : - __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS), - __entry->guest_rip, __entry->info1, __entry->info2) -); +TRACE_EVENT_KVM_EXIT(kvm_exit); /* * Tracepoint for kvm interrupt injection: */ TRACE_EVENT(kvm_inj_virq, - TP_PROTO(unsigned int irq), - TP_ARGS(irq), + TP_PROTO(unsigned int vector, bool soft, bool reinjected), + TP_ARGS(vector, soft, reinjected), TP_STRUCT__entry( - __field( unsigned int, irq ) + __field( unsigned int, vector ) + __field( bool, soft ) + __field( bool, reinjected ) ), TP_fast_assign( - __entry->irq = irq; + __entry->vector = vector; + __entry->soft = soft; + __entry->reinjected = reinjected; ), - TP_printk("irq %u", __entry->irq) + TP_printk("%s 0x%x%s", + __entry->soft ? "Soft/INTn" : "IRQ", __entry->vector, + __entry->reinjected ? " [reinjected]" : "") ); #define EXS(x) { x##_VECTOR, "#" #x } @@ -281,45 +364,55 @@ TRACE_EVENT(kvm_inj_virq, * Tracepoint for kvm interrupt injection: */ TRACE_EVENT(kvm_inj_exception, - TP_PROTO(unsigned exception, bool has_error, unsigned error_code), - TP_ARGS(exception, has_error, error_code), + TP_PROTO(unsigned exception, bool has_error, unsigned error_code, + bool reinjected), + TP_ARGS(exception, has_error, error_code, reinjected), TP_STRUCT__entry( __field( u8, exception ) __field( u8, has_error ) __field( u32, error_code ) + __field( bool, reinjected ) ), TP_fast_assign( __entry->exception = exception; __entry->has_error = has_error; __entry->error_code = error_code; + __entry->reinjected = reinjected; ), - TP_printk("%s (0x%x)", + TP_printk("%s%s%s%s%s", __print_symbolic(__entry->exception, kvm_trace_sym_exc), - /* FIXME: don't print error_code if not present */ - __entry->has_error ? __entry->error_code : 0) + !__entry->has_error ? "" : " (", + !__entry->has_error ? "" : __print_symbolic(__entry->error_code, { }), + !__entry->has_error ? "" : ")", + __entry->reinjected ? " [reinjected]" : "") ); /* * Tracepoint for page fault. */ TRACE_EVENT(kvm_page_fault, - TP_PROTO(unsigned long fault_address, unsigned int error_code), - TP_ARGS(fault_address, error_code), + TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code), + TP_ARGS(vcpu, fault_address, error_code), TP_STRUCT__entry( - __field( unsigned long, fault_address ) - __field( unsigned int, error_code ) + __field( unsigned int, vcpu_id ) + __field( unsigned long, guest_rip ) + __field( u64, fault_address ) + __field( u64, error_code ) ), TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->guest_rip = kvm_rip_read(vcpu); __entry->fault_address = fault_address; __entry->error_code = error_code; ), - TP_printk("address %lx error_code %x", + TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx", + __entry->vcpu_id, __entry->guest_rip, __entry->fault_address, __entry->error_code) ); @@ -501,10 +594,12 @@ TRACE_EVENT(kvm_pv_eoi, /* * Tracepoint for nested VMRUN */ -TRACE_EVENT(kvm_nested_vmrun, +TRACE_EVENT(kvm_nested_vmenter, TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, - __u32 event_inj, bool npt), - TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), + __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd, + __u64 guest_cr3, __u32 isa), + TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, + guest_tdp_pgd, guest_cr3, isa), TP_STRUCT__entry( __field( __u64, rip ) @@ -512,7 +607,9 @@ TRACE_EVENT(kvm_nested_vmrun, __field( __u64, nested_rip ) __field( __u32, int_ctl ) __field( __u32, event_inj ) - __field( bool, npt ) + __field( bool, tdp_enabled ) + __field( __u64, guest_pgd ) + __field( __u32, isa ) ), TP_fast_assign( @@ -521,76 +618,59 @@ TRACE_EVENT(kvm_nested_vmrun, __entry->nested_rip = nested_rip; __entry->int_ctl = int_ctl; __entry->event_inj = event_inj; - __entry->npt = npt; + __entry->tdp_enabled = tdp_enabled; + __entry->guest_pgd = tdp_enabled ? guest_tdp_pgd : guest_cr3; + __entry->isa = isa; ), - TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " - "event_inj: 0x%08x npt: %s", - __entry->rip, __entry->vmcb, __entry->nested_rip, - __entry->int_ctl, __entry->event_inj, - __entry->npt ? "on" : "off") + TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx " + "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx", + __entry->rip, + __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb", + __entry->vmcb, + __entry->nested_rip, + __entry->int_ctl, + __entry->event_inj, + __entry->isa == KVM_ISA_VMX ? "ept" : "npt", + __entry->tdp_enabled ? "y" : "n", + !__entry->tdp_enabled ? "guest_cr3" : + __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3", + __entry->guest_pgd) ); TRACE_EVENT(kvm_nested_intercepts, - TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept), - TP_ARGS(cr_read, cr_write, exceptions, intercept), + TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, + __u32 intercept1, __u32 intercept2, __u32 intercept3), + TP_ARGS(cr_read, cr_write, exceptions, intercept1, + intercept2, intercept3), TP_STRUCT__entry( __field( __u16, cr_read ) __field( __u16, cr_write ) __field( __u32, exceptions ) - __field( __u64, intercept ) + __field( __u32, intercept1 ) + __field( __u32, intercept2 ) + __field( __u32, intercept3 ) ), TP_fast_assign( __entry->cr_read = cr_read; __entry->cr_write = cr_write; __entry->exceptions = exceptions; - __entry->intercept = intercept; + __entry->intercept1 = intercept1; + __entry->intercept2 = intercept2; + __entry->intercept3 = intercept3; ), - TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx", - __entry->cr_read, __entry->cr_write, __entry->exceptions, - __entry->intercept) + TP_printk("cr_read: %04x cr_write: %04x excp: %08x " + "intercepts: %08x %08x %08x", + __entry->cr_read, __entry->cr_write, __entry->exceptions, + __entry->intercept1, __entry->intercept2, __entry->intercept3) ); /* * Tracepoint for #VMEXIT while nested */ -TRACE_EVENT(kvm_nested_vmexit, - TP_PROTO(__u64 rip, __u32 exit_code, - __u64 exit_info1, __u64 exit_info2, - __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), - TP_ARGS(rip, exit_code, exit_info1, exit_info2, - exit_int_info, exit_int_info_err, isa), - - TP_STRUCT__entry( - __field( __u64, rip ) - __field( __u32, exit_code ) - __field( __u64, exit_info1 ) - __field( __u64, exit_info2 ) - __field( __u32, exit_int_info ) - __field( __u32, exit_int_info_err ) - __field( __u32, isa ) - ), - - TP_fast_assign( - __entry->rip = rip; - __entry->exit_code = exit_code; - __entry->exit_info1 = exit_info1; - __entry->exit_info2 = exit_info2; - __entry->exit_int_info = exit_int_info; - __entry->exit_int_info_err = exit_int_info_err; - __entry->isa = isa; - ), - TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " - "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", - __entry->rip, - (__entry->isa == KVM_ISA_VMX) ? - __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : - __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), - __entry->exit_info1, __entry->exit_info2, - __entry->exit_int_info, __entry->exit_int_info_err) -); +TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); /* * Tracepoint for #VMEXIT reinjected to the guest @@ -620,13 +700,11 @@ TRACE_EVENT(kvm_nested_vmexit_inject, __entry->isa = isa; ), - TP_printk("reason: %s ext_inf1: 0x%016llx " + TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", - (__entry->isa == KVM_ISA_VMX) ? - __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) : - __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS), - __entry->exit_info1, __entry->exit_info2, - __entry->exit_int_info, __entry->exit_int_info_err) + kvm_print_exit_reason(__entry->exit_code, __entry->isa), + __entry->exit_info1, __entry->exit_info2, + __entry->exit_int_info, __entry->exit_int_info_err) ); /* @@ -744,14 +822,14 @@ TRACE_EVENT(kvm_emulate_insn, ), TP_fast_assign( - __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); - __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr - - vcpu->arch.emulate_ctxt.fetch.data; - __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; + __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); + __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr + - vcpu->arch.emulate_ctxt->fetch.data; + __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; memcpy(__entry->insn, - vcpu->arch.emulate_ctxt.fetch.data, + vcpu->arch.emulate_ctxt->fetch.data, 15); - __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); + __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); __entry->failed = failed; ), @@ -815,8 +893,8 @@ TRACE_EVENT(kvm_write_tsc_offset, #ifdef CONFIG_X86_64 #define host_clocks \ - {VCLOCK_NONE, "none"}, \ - {VCLOCK_TSC, "tsc"} \ + {VDSO_CLOCKMODE_NONE, "none"}, \ + {VDSO_CLOCKMODE_TSC, "tsc"} \ TRACE_EVENT(kvm_update_master_clock, TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), @@ -967,7 +1045,7 @@ TRACE_EVENT(kvm_wait_lapic_expire, __entry->delta < 0 ? "early" : "late") ); -TRACE_EVENT(kvm_enter_smm, +TRACE_EVENT(kvm_smm_transition, TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), TP_ARGS(vcpu_id, smbase, entering), @@ -1291,23 +1369,49 @@ TRACE_EVENT(kvm_hv_stimer_cleanup, __entry->vcpu_id, __entry->timer_index) ); -TRACE_EVENT(kvm_apicv_update_request, - TP_PROTO(bool activate, unsigned long bit), - TP_ARGS(activate, bit), +TRACE_EVENT(kvm_apicv_inhibit_changed, + TP_PROTO(int reason, bool set, unsigned long inhibits), + TP_ARGS(reason, set, inhibits), TP_STRUCT__entry( - __field(bool, activate) - __field(unsigned long, bit) + __field(int, reason) + __field(bool, set) + __field(unsigned long, inhibits) ), TP_fast_assign( - __entry->activate = activate; - __entry->bit = bit; + __entry->reason = reason; + __entry->set = set; + __entry->inhibits = inhibits; ), - TP_printk("%s bit=%lu", - __entry->activate ? "activate" : "deactivate", - __entry->bit) + TP_printk("%s reason=%u, inhibits=0x%lx", + __entry->set ? "set" : "cleared", + __entry->reason, __entry->inhibits) +); + +TRACE_EVENT(kvm_apicv_accept_irq, + TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), + TP_ARGS(apicid, dm, tm, vec), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( __u16, dm ) + __field( __u16, tm ) + __field( __u8, vec ) + ), + + TP_fast_assign( + __entry->apicid = apicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + ), + + TP_printk("apicid %x vec %u (%s|%s)", + __entry->apicid, __entry->vec, + __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), + __entry->tm ? "level" : "edge") ); /* @@ -1367,6 +1471,62 @@ TRACE_EVENT(kvm_avic_unaccelerated_access, __entry->vec) ); +TRACE_EVENT(kvm_avic_ga_log, + TP_PROTO(u32 vmid, u32 vcpuid), + TP_ARGS(vmid, vcpuid), + + TP_STRUCT__entry( + __field(u32, vmid) + __field(u32, vcpuid) + ), + + TP_fast_assign( + __entry->vmid = vmid; + __entry->vcpuid = vcpuid; + ), + + TP_printk("vmid=%u, vcpuid=%u", + __entry->vmid, __entry->vcpuid) +); + +TRACE_EVENT(kvm_avic_kick_vcpu_slowpath, + TP_PROTO(u32 icrh, u32 icrl, u32 index), + TP_ARGS(icrh, icrl, index), + + TP_STRUCT__entry( + __field(u32, icrh) + __field(u32, icrl) + __field(u32, index) + ), + + TP_fast_assign( + __entry->icrh = icrh; + __entry->icrl = icrl; + __entry->index = index; + ), + + TP_printk("icrh:icrl=%#08x:%08x, index=%u", + __entry->icrh, __entry->icrl, __entry->index) +); + +TRACE_EVENT(kvm_avic_doorbell, + TP_PROTO(u32 vcpuid, u32 apicid), + TP_ARGS(vcpuid, apicid), + + TP_STRUCT__entry( + __field(u32, vcpuid) + __field(u32, apicid) + ), + + TP_fast_assign( + __entry->vcpuid = vcpuid; + __entry->apicid = apicid; + ), + + TP_printk("vcpuid=%u, apicid=%u", + __entry->vcpuid, __entry->apicid) +); + TRACE_EVENT(kvm_hv_timer_state, TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), TP_ARGS(vcpu_id, hv_timer_in_use), @@ -1502,19 +1662,167 @@ TRACE_EVENT(kvm_nested_vmenter_failed, TP_ARGS(msg, err), TP_STRUCT__entry( - __field(const char *, msg) + __string(msg, msg) __field(u32, err) ), TP_fast_assign( - __entry->msg = msg; + __assign_str(msg, msg); __entry->err = err; ), - TP_printk("%s%s", __entry->msg, !__entry->err ? "" : + TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) ); +/* + * Tracepoint for syndbg_set_msr. + */ +TRACE_EVENT(kvm_hv_syndbg_set_msr, + TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), + TP_ARGS(vcpu_id, vp_index, msr, data), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, vp_index) + __field(u32, msr) + __field(u64, data) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vp_index = vp_index; + __entry->msr = msr; + __entry->data = data; + ), + + TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", + __entry->vcpu_id, __entry->vp_index, __entry->msr, + __entry->data) +); + +/* + * Tracepoint for syndbg_get_msr. + */ +TRACE_EVENT(kvm_hv_syndbg_get_msr, + TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), + TP_ARGS(vcpu_id, vp_index, msr, data), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u32, vp_index) + __field(u32, msr) + __field(u64, data) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vp_index = vp_index; + __entry->msr = msr; + __entry->data = data; + ), + + TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", + __entry->vcpu_id, __entry->vp_index, __entry->msr, + __entry->data) +); + +/* + * Tracepoint for the start of VMGEXIT processing + */ +TRACE_EVENT(kvm_vmgexit_enter, + TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), + TP_ARGS(vcpu_id, ghcb), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(u64, exit_reason) + __field(u64, info1) + __field(u64, info2) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->exit_reason = ghcb->save.sw_exit_code; + __entry->info1 = ghcb->save.sw_exit_info_1; + __entry->info2 = ghcb->save.sw_exit_info_2; + ), + + TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", + __entry->vcpu_id, __entry->exit_reason, + __entry->info1, __entry->info2) +); + +/* + * Tracepoint for the end of VMGEXIT processing + */ +TRACE_EVENT(kvm_vmgexit_exit, + TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), + TP_ARGS(vcpu_id, ghcb), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(u64, exit_reason) + __field(u64, info1) + __field(u64, info2) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->exit_reason = ghcb->save.sw_exit_code; + __entry->info1 = ghcb->save.sw_exit_info_1; + __entry->info2 = ghcb->save.sw_exit_info_2; + ), + + TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", + __entry->vcpu_id, __entry->exit_reason, + __entry->info1, __entry->info2) +); + +/* + * Tracepoint for the start of VMGEXIT MSR procotol processing + */ +TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, + TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), + TP_ARGS(vcpu_id, ghcb_gpa), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(u64, ghcb_gpa) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->ghcb_gpa = ghcb_gpa; + ), + + TP_printk("vcpu %u, ghcb_gpa %016llx", + __entry->vcpu_id, __entry->ghcb_gpa) +); + +/* + * Tracepoint for the end of VMGEXIT MSR procotol processing + */ +TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, + TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), + TP_ARGS(vcpu_id, ghcb_gpa, result), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(u64, ghcb_gpa) + __field(int, result) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->ghcb_gpa = ghcb_gpa; + __entry->result = result; + ), + + TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", + __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) +); + #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH |