aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/asm/cputype.h4
-rw-r--r--tools/arch/x86/include/asm/atomic.h7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/lib/memcpy_64.S2
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-libbpf-bpf_program__set_insns.c8
-rw-r--r--tools/iio/iio_utils.c4
-rw-r--r--tools/include/asm-generic/atomic-gcc.h12
-rw-r--r--tools/include/nolibc/string.h17
-rw-r--r--tools/include/uapi/linux/in.h22
-rw-r--r--tools/include/uapi/linux/perf_event.h27
-rw-r--r--tools/include/uapi/linux/stat.h4
-rw-r--r--tools/include/uapi/sound/asound.h16
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat98
-rw-r--r--tools/perf/Documentation/arm-coresight.txt (renamed from tools/perf/Documentation/perf-arm-coresight.txt)0
-rw-r--r--tools/perf/Makefile.config5
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl16
-rw-r--r--tools/perf/builtin-record.c41
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json6
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json72
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json (renamed from tools/perf/pmu-events/arch/s390/cf_z16/pai.json)0
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh6
-rw-r--r--tools/perf/trace/beauty/statx.c1
-rw-r--r--tools/perf/util/auxtrace.c10
-rw-r--r--tools/perf/util/bpf-event.c5
-rw-r--r--tools/perf/util/bpf-loader.c18
-rw-r--r--tools/perf/util/include/linux/linkage.h13
-rw-r--r--tools/power/pm-graph/README12
-rw-r--r--tools/power/pm-graph/sleepgraph.83
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py225
-rw-r--r--tools/testing/cxl/test/cxl.c301
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c13
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_success.c4
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile4
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh2
l---------tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh4
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile4
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh6
l---------tools/testing/selftests/drivers/net/team/lag_lib.sh1
l---------tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile6
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile6
-rw-r--r--tools/testing/selftests/kexec/Makefile6
-rw-r--r--tools/testing/selftests/kvm/.gitignore8
-rw-r--r--tools/testing/selftests/kvm/Makefile14
-rw-r--r--tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c3
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c29
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c32
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c3
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_test.c1
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c2
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c10
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c22
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c24
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c130
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c3
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h29
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h72
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h63
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h25
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h10
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/evmcs.h48
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h103
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h452
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm.h26
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h14
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h25
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c6
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c18
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c102
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c94
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c (renamed from tools/testing/selftests/kvm/lib/perf_test_util.c)133
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c42
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c39
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c36
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c103
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/hyperv.c46
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/memstress.c (renamed from tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c)37
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c236
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c39
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c56
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c21
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c38
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c28
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c2
-rw-r--r--tools/testing/selftests/kvm/s390x/resets.c2
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c3
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c5
-rw-r--r--tools/testing/selftests/kvm/steal_time.c1
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c101
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c11
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/emulator_error_test.c193
-rw-r--r--tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c45
-rw-r--r--tools/testing/selftests/kvm/x86_64/flds_emulation.h55
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c (renamed from tools/testing/selftests/kvm/x86_64/evmcs_test.c)96
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c25
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_ipi.c314
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c99
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c690
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c77
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c111
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c67
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c73
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c88
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c19
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c142
-rw-r--r--tools/testing/selftests/landlock/Makefile7
-rw-r--r--tools/testing/selftests/lib.mk4
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh1
-rw-r--r--tools/testing/selftests/net/openvswitch/Makefile13
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh218
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py351
-rw-r--r--tools/testing/selftests/pidfd/Makefile2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c4
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c12
135 files changed, 4447 insertions, 1673 deletions
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 8aa0d276a636..abc418650fec 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -60,6 +60,7 @@
#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
+#define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -123,6 +124,8 @@
#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
+#define AMPERE_CPU_PART_AMPERE1 0xAC3
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -172,6 +175,7 @@
#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 1f5e26aae9fc..01cc27ec4520 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -8,6 +8,7 @@
#define LOCK_PREFIX "\n\tlock; "
+#include <asm/asm.h>
#include <asm/cmpxchg.h>
/*
@@ -70,4 +71,10 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return cmpxchg(&v->counter, old, new);
}
+static inline int atomic_test_and_set_bit(long nr, unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, "Ir", nr, "%0", "c");
+
+}
+
#endif /* _TOOLS_LINUX_ASM_X86_ATOMIC_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index ef4775c6db01..b71f4f2ecdd5 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-/* FREE! ( 3*32+17) */
+#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index d0d7b9bc6cad..5418e2f99834 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -27,7 +27,7 @@
* Output:
* rax original destination
*/
-SYM_FUNC_START(__memcpy)
+SYM_TYPED_FUNC_START(__memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 57619f240b56..38f8851bd7cb 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -103,6 +103,7 @@ FEATURE_TESTS_EXTRA := \
libbpf-bpf_prog_load \
libbpf-bpf_object__next_program \
libbpf-bpf_object__next_map \
+ libbpf-bpf_program__set_insns \
libbpf-bpf_create_map \
libpfm4 \
libdebuginfod \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 04b07ff88234..690fe97be190 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -63,6 +63,7 @@ FILES= \
test-libbpf-bpf_map_create.bin \
test-libbpf-bpf_object__next_program.bin \
test-libbpf-bpf_object__next_map.bin \
+ test-libbpf-bpf_program__set_insns.bin \
test-libbpf-btf__raw_data.bin \
test-get_cpuid.bin \
test-sdt.bin \
@@ -316,6 +317,9 @@ $(OUTPUT)test-libbpf-bpf_object__next_program.bin:
$(OUTPUT)test-libbpf-bpf_object__next_map.bin:
$(BUILD) -lbpf
+$(OUTPUT)test-libbpf-bpf_program__set_insns.bin:
+ $(BUILD) -lbpf
+
$(OUTPUT)test-libbpf-btf__raw_data.bin:
$(BUILD) -lbpf
diff --git a/tools/build/feature/test-libbpf-bpf_program__set_insns.c b/tools/build/feature/test-libbpf-bpf_program__set_insns.c
new file mode 100644
index 000000000000..f3b7f18c8f49
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_program__set_insns.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ bpf_program__set_insns(NULL /* prog */, NULL /* new_insns */, 0 /* new_insn_cnt */);
+ return 0;
+}
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index aadee6d34c74..8d35893b2fa8 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -547,6 +547,10 @@ static int calc_digits(int num)
{
int count = 0;
+ /* It takes a digit to represent zero */
+ if (!num)
+ return 1;
+
while (num != 0) {
num /= 10;
count++;
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 4c1966f7c77a..6daa68bf5b9e 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/bitops.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -69,4 +70,15 @@ static inline int atomic_cmpxchg(atomic_t *v, int oldval, int newval)
return cmpxchg(&(v)->counter, oldval, newval);
}
+static inline int atomic_test_and_set_bit(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ long old;
+
+ addr += BIT_WORD(nr);
+
+ old = __sync_fetch_and_or(addr, mask);
+ return !!(old & mask);
+}
+
#endif /* __TOOLS_ASM_GENERIC_ATOMIC_H */
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
index bef35bee9c44..ad97c0d522b8 100644
--- a/tools/include/nolibc/string.h
+++ b/tools/include/nolibc/string.h
@@ -19,9 +19,9 @@ static __attribute__((unused))
int memcmp(const void *s1, const void *s2, size_t n)
{
size_t ofs = 0;
- char c1 = 0;
+ int c1 = 0;
- while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
+ while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) {
ofs++;
}
return c1;
@@ -125,14 +125,18 @@ char *strcpy(char *dst, const char *src)
}
/* this function is only used with arguments that are not constants or when
- * it's not known because optimizations are disabled.
+ * it's not known because optimizations are disabled. Note that gcc 12
+ * recognizes an strlen() pattern and replaces it with a jump to strlen(),
+ * thus itself, hence the asm() statement below that's meant to disable this
+ * confusing practice.
*/
static __attribute__((unused))
-size_t nolibc_strlen(const char *str)
+size_t strlen(const char *str)
{
size_t len;
- for (len = 0; str[len]; len++);
+ for (len = 0; str[len]; len++)
+ asm("");
return len;
}
@@ -140,13 +144,12 @@ size_t nolibc_strlen(const char *str)
* the two branches, then will rely on an external definition of strlen().
*/
#if defined(__OPTIMIZE__)
+#define nolibc_strlen(x) strlen(x)
#define strlen(str) ({ \
__builtin_constant_p((str)) ? \
__builtin_strlen((str)) : \
nolibc_strlen((str)); \
})
-#else
-#define strlen(str) nolibc_strlen((str))
#endif
static __attribute__((unused))
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 14168225cecd..f243ce665f74 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -68,6 +68,8 @@ enum {
#define IPPROTO_PIM IPPROTO_PIM
IPPROTO_COMP = 108, /* Compression Header Protocol */
#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_L2TP = 115, /* Layer 2 Tunnelling Protocol */
+#define IPPROTO_L2TP IPPROTO_L2TP
IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
@@ -188,21 +190,13 @@ struct ip_mreq_source {
};
struct ip_msfilter {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
union {
- struct {
- __be32 imsf_multiaddr_aux;
- __be32 imsf_interface_aux;
- __u32 imsf_fmode_aux;
- __u32 imsf_numsrc_aux;
- __be32 imsf_slist[1];
- };
- struct {
- __be32 imsf_multiaddr;
- __be32 imsf_interface;
- __u32 imsf_fmode;
- __u32 imsf_numsrc;
- __be32 imsf_slist_flex[];
- };
+ __be32 imsf_slist[1];
+ __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
};
};
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index ea6defacc1a7..ccb7f5dad59b 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -164,8 +164,6 @@ enum perf_event_sample_format {
PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
-
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
@@ -263,6 +261,17 @@ enum {
PERF_BR_MAX,
};
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_MAX,
+};
+
enum {
PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
@@ -282,11 +291,11 @@ enum {
PERF_BR_PRIV_HV = 3,
};
-#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
-#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
-#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
-#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
-#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
@@ -1397,6 +1406,7 @@ union perf_mem_data_src {
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
+ * spec: branch speculation info (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -1407,9 +1417,10 @@ struct perf_branch_entry {
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
type:4, /* branch type */
+ spec:2, /* branch speculation info */
new_type:4, /* additional branch type */
priv:3, /* privilege level */
- reserved:33;
+ reserved:31;
};
union perf_sample_weight {
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index 1500a0f58041..7cab2c65d3d7 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -124,7 +124,8 @@ struct statx {
__u32 stx_dev_minor;
/* 0x90 */
__u64 stx_mnt_id;
- __u64 __spare2;
+ __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
/* 0xa0 */
__u64 __spare3[12]; /* Spare space for future expansion */
/* 0x100 */
@@ -152,6 +153,7 @@ struct statx {
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
+#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 3974a2a911cc..de6810e94abe 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -3,22 +3,6 @@
* Advanced Linux Sound Architecture - ALSA - Driver
* Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASOUND_H
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 9c366b3a676d..6f28180ffeea 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -41,11 +41,14 @@ VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
'EXTERNAL_INTERRUPT': 1,
'TRIPLE_FAULT': 2,
- 'PENDING_INTERRUPT': 7,
+ 'INIT_SIGNAL': 3,
+ 'SIPI_SIGNAL': 4,
+ 'INTERRUPT_WINDOW': 7,
'NMI_WINDOW': 8,
'TASK_SWITCH': 9,
'CPUID': 10,
'HLT': 12,
+ 'INVD': 13,
'INVLPG': 14,
'RDPMC': 15,
'RDTSC': 16,
@@ -65,26 +68,48 @@ VMX_EXIT_REASONS = {
'MSR_READ': 31,
'MSR_WRITE': 32,
'INVALID_STATE': 33,
+ 'MSR_LOAD_FAIL': 34,
'MWAIT_INSTRUCTION': 36,
+ 'MONITOR_TRAP_FLAG': 37,
'MONITOR_INSTRUCTION': 39,
'PAUSE_INSTRUCTION': 40,
'MCE_DURING_VMENTRY': 41,
'TPR_BELOW_THRESHOLD': 43,
'APIC_ACCESS': 44,
+ 'EOI_INDUCED': 45,
+ 'GDTR_IDTR': 46,
+ 'LDTR_TR': 47,
'EPT_VIOLATION': 48,
'EPT_MISCONFIG': 49,
+ 'INVEPT': 50,
+ 'RDTSCP': 51,
+ 'PREEMPTION_TIMER': 52,
+ 'INVVPID': 53,
'WBINVD': 54,
'XSETBV': 55,
'APIC_WRITE': 56,
+ 'RDRAND': 57,
'INVPCID': 58,
+ 'VMFUNC': 59,
+ 'ENCLS': 60,
+ 'RDSEED': 61,
+ 'PML_FULL': 62,
+ 'XSAVES': 63,
+ 'XRSTORS': 64,
+ 'UMWAIT': 67,
+ 'TPAUSE': 68,
+ 'BUS_LOCK': 74,
+ 'NOTIFY': 75,
}
SVM_EXIT_REASONS = {
'READ_CR0': 0x000,
+ 'READ_CR2': 0x002,
'READ_CR3': 0x003,
'READ_CR4': 0x004,
'READ_CR8': 0x008,
'WRITE_CR0': 0x010,
+ 'WRITE_CR2': 0x012,
'WRITE_CR3': 0x013,
'WRITE_CR4': 0x014,
'WRITE_CR8': 0x018,
@@ -105,6 +130,7 @@ SVM_EXIT_REASONS = {
'WRITE_DR6': 0x036,
'WRITE_DR7': 0x037,
'EXCP_BASE': 0x040,
+ 'LAST_EXCP': 0x05f,
'INTR': 0x060,
'NMI': 0x061,
'SMI': 0x062,
@@ -151,21 +177,45 @@ SVM_EXIT_REASONS = {
'MWAIT': 0x08b,
'MWAIT_COND': 0x08c,
'XSETBV': 0x08d,
+ 'RDPRU': 0x08e,
+ 'EFER_WRITE_TRAP': 0x08f,
+ 'CR0_WRITE_TRAP': 0x090,
+ 'CR1_WRITE_TRAP': 0x091,
+ 'CR2_WRITE_TRAP': 0x092,
+ 'CR3_WRITE_TRAP': 0x093,
+ 'CR4_WRITE_TRAP': 0x094,
+ 'CR5_WRITE_TRAP': 0x095,
+ 'CR6_WRITE_TRAP': 0x096,
+ 'CR7_WRITE_TRAP': 0x097,
+ 'CR8_WRITE_TRAP': 0x098,
+ 'CR9_WRITE_TRAP': 0x099,
+ 'CR10_WRITE_TRAP': 0x09a,
+ 'CR11_WRITE_TRAP': 0x09b,
+ 'CR12_WRITE_TRAP': 0x09c,
+ 'CR13_WRITE_TRAP': 0x09d,
+ 'CR14_WRITE_TRAP': 0x09e,
+ 'CR15_WRITE_TRAP': 0x09f,
+ 'INVPCID': 0x0a2,
'NPF': 0x400,
+ 'AVIC_INCOMPLETE_IPI': 0x401,
+ 'AVIC_UNACCELERATED_ACCESS': 0x402,
+ 'VMGEXIT': 0x403,
}
-# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
+# EC definition of HSR (from arch/arm64/include/asm/esr.h)
AARCH64_EXIT_REASONS = {
'UNKNOWN': 0x00,
- 'WFI': 0x01,
+ 'WFx': 0x01,
'CP15_32': 0x03,
'CP15_64': 0x04,
'CP14_MR': 0x05,
'CP14_LS': 0x06,
'FP_ASIMD': 0x07,
'CP10_ID': 0x08,
+ 'PAC': 0x09,
'CP14_64': 0x0C,
- 'ILL_ISS': 0x0E,
+ 'BTI': 0x0D,
+ 'ILL': 0x0E,
'SVC32': 0x11,
'HVC32': 0x12,
'SMC32': 0x13,
@@ -173,21 +223,26 @@ AARCH64_EXIT_REASONS = {
'HVC64': 0x16,
'SMC64': 0x17,
'SYS64': 0x18,
- 'IABT': 0x20,
- 'IABT_HYP': 0x21,
+ 'SVE': 0x19,
+ 'ERET': 0x1A,
+ 'FPAC': 0x1C,
+ 'SME': 0x1D,
+ 'IMP_DEF': 0x1F,
+ 'IABT_LOW': 0x20,
+ 'IABT_CUR': 0x21,
'PC_ALIGN': 0x22,
- 'DABT': 0x24,
- 'DABT_HYP': 0x25,
+ 'DABT_LOW': 0x24,
+ 'DABT_CUR': 0x25,
'SP_ALIGN': 0x26,
'FP_EXC32': 0x28,
'FP_EXC64': 0x2C,
'SERROR': 0x2F,
- 'BREAKPT': 0x30,
- 'BREAKPT_HYP': 0x31,
- 'SOFTSTP': 0x32,
- 'SOFTSTP_HYP': 0x33,
- 'WATCHPT': 0x34,
- 'WATCHPT_HYP': 0x35,
+ 'BREAKPT_LOW': 0x30,
+ 'BREAKPT_CUR': 0x31,
+ 'SOFTSTP_LOW': 0x32,
+ 'SOFTSTP_CUR': 0x33,
+ 'WATCHPT_LOW': 0x34,
+ 'WATCHPT_CUR': 0x35,
'BKPT32': 0x38,
'VECTOR32': 0x3A,
'BRK64': 0x3C,
@@ -220,6 +275,19 @@ USERSPACE_EXIT_REASONS = {
'S390_TSCH': 22,
'EPR': 23,
'SYSTEM_EVENT': 24,
+ 'S390_STSI': 25,
+ 'IOAPIC_EOI': 26,
+ 'HYPERV': 27,
+ 'ARM_NISV': 28,
+ 'X86_RDMSR': 29,
+ 'X86_WRMSR': 30,
+ 'DIRTY_RING_FULL': 31,
+ 'AP_RESET_HOLD': 32,
+ 'X86_BUS_LOCK': 33,
+ 'XEN': 34,
+ 'RISCV_SBI': 35,
+ 'RISCV_CSR': 36,
+ 'NOTIFY': 37,
}
IOCTL_NUMBERS = {
@@ -1756,7 +1824,7 @@ def assign_globals():
debugfs = ''
for line in open('/proc/mounts'):
- if line.split(' ')[0] == 'debugfs':
+ if line.split(' ')[2] == 'debugfs':
debugfs = line.split(' ')[1]
break
if debugfs == '':
diff --git a/tools/perf/Documentation/perf-arm-coresight.txt b/tools/perf/Documentation/arm-coresight.txt
index c117fc50a2a9..c117fc50a2a9 100644
--- a/tools/perf/Documentation/perf-arm-coresight.txt
+++ b/tools/perf/Documentation/arm-coresight.txt
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 6fd4b1384b97..898226ea8cad 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -588,6 +588,10 @@ ifndef NO_LIBELF
ifeq ($(feature-libbpf-bpf_object__next_map), 1)
CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
endif
+ $(call feature_check,libbpf-bpf_program__set_insns)
+ ifeq ($(feature-libbpf-bpf_program__set_insns), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+ endif
$(call feature_check,libbpf-btf__raw_data)
ifeq ($(feature-libbpf-btf__raw_data), 1)
CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
@@ -604,6 +608,7 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
endif
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 2bca64f96164..e9e0df4f9a61 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -228,8 +228,10 @@
176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-179 common pread64 sys_pread64 compat_sys_ppc_pread64
-180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64
+179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+179 64 pread64 sys_pread64
+180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+180 64 pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -242,10 +244,11 @@
188 common putpmsg sys_ni_syscall
189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
-191 common readahead sys_readahead compat_sys_ppc_readahead
+191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+191 64 readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
-193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64
-194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64
+193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
195 32 stat64 sys_stat64
196 32 lstat64 sys_lstat64
197 32 fstat64 sys_fstat64
@@ -288,7 +291,8 @@
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
-233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64
+233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+233 64 fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
236 common epoll_create sys_epoll_create
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 52d254b1530c..e128b855ddde 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -649,7 +649,7 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
static volatile int signr = -1;
static volatile int child_finished;
#ifdef HAVE_EVENTFD_SUPPORT
-static int done_fd = -1;
+static volatile int done_fd = -1;
#endif
static void sig_handler(int sig)
@@ -661,19 +661,24 @@ static void sig_handler(int sig)
done = 1;
#ifdef HAVE_EVENTFD_SUPPORT
-{
- u64 tmp = 1;
- /*
- * It is possible for this signal handler to run after done is checked
- * in the main loop, but before the perf counter fds are polled. If this
- * happens, the poll() will continue to wait even though done is set,
- * and will only break out if either another signal is received, or the
- * counters are ready for read. To ensure the poll() doesn't sleep when
- * done is set, use an eventfd (done_fd) to wake up the poll().
- */
- if (write(done_fd, &tmp, sizeof(tmp)) < 0)
- pr_err("failed to signal wakeup fd, error: %m\n");
-}
+ if (done_fd >= 0) {
+ u64 tmp = 1;
+ int orig_errno = errno;
+
+ /*
+ * It is possible for this signal handler to run after done is
+ * checked in the main loop, but before the perf counter fds are
+ * polled. If this happens, the poll() will continue to wait
+ * even though done is set, and will only break out if either
+ * another signal is received, or the counters are ready for
+ * read. To ensure the poll() doesn't sleep when done is set,
+ * use an eventfd (done_fd) to wake up the poll().
+ */
+ if (write(done_fd, &tmp, sizeof(tmp)) < 0)
+ pr_err("failed to signal wakeup fd, error: %m\n");
+
+ errno = orig_errno;
+ }
#endif // HAVE_EVENTFD_SUPPORT
}
@@ -2834,8 +2839,12 @@ out_free_threads:
out_delete_session:
#ifdef HAVE_EVENTFD_SUPPORT
- if (done_fd >= 0)
- close(done_fd);
+ if (done_fd >= 0) {
+ fd = done_fd;
+ done_fd = -1;
+
+ close(fd);
+ }
#endif
zstd_fini(&session->zstd_data);
perf_session__delete(session);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 6ee44b18c6b5..eacca9a874e2 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -143,7 +143,7 @@ for i in $SYNC_CHECK_FILES; do
done
# diff with extra ignore lines
-check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
+check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))" -I"^#include <linux/cfi_types.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
check arch/x86/include/asm/amd-ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"'
check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"'
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
index 6970203cb247..6443a061e22a 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
@@ -112,21 +112,21 @@
"MetricName": "indirect_branch"
},
{
- "MetricExpr": "(armv8_pmuv3_0@event\\=0x1014@ + armv8_pmuv3_0@event\\=0x1018@) / BR_MIS_PRED",
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x1013@ + armv8_pmuv3_0@event\\=0x1016@) / BR_MIS_PRED",
"PublicDescription": "Push branch L3 topdown metric",
"BriefDescription": "Push branch L3 topdown metric",
"MetricGroup": "TopDownL3",
"MetricName": "push_branch"
},
{
- "MetricExpr": "armv8_pmuv3_0@event\\=0x100c@ / BR_MIS_PRED",
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x100d@ / BR_MIS_PRED",
"PublicDescription": "Pop branch L3 topdown metric",
"BriefDescription": "Pop branch L3 topdown metric",
"MetricGroup": "TopDownL3",
"MetricName": "pop_branch"
},
{
- "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1014@ - armv8_pmuv3_0@event\\=0x1018@ - armv8_pmuv3_0@event\\=0x100c@) / BR_MIS_PRED",
+ "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1013@ - armv8_pmuv3_0@event\\=0x1016@ - armv8_pmuv3_0@event\\=0x100d@) / BR_MIS_PRED",
"PublicDescription": "Other branch L3 topdown metric",
"BriefDescription": "Other branch L3 topdown metric",
"MetricGroup": "TopDownL3",
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
index 8ba3e81c9808..fe050d44374b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
@@ -1,13 +1,13 @@
[
{
"MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P01",
- "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@) * 100",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P23",
- "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@) * 100",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
@@ -61,13 +61,13 @@
},
{
"MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P01",
- "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@) * 100",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P23",
- "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@) * 100",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
@@ -151,193 +151,193 @@
},
{
"MetricName": "XLINK0_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK1_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK2_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK3_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK4_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK5_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK6_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK7_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK0_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK1_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK2_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK3_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK4_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK5_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK6_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "XLINK7_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK0_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK1_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK2_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK3_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK4_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK5_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK6_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK7_OUT_TOTAL_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK0_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK1_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK2_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK3_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK4_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK5_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK6_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
{
"MetricName": "ALINK7_OUT_DATA_UTILIZATION",
- "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
"ScaleUnit": "1.063%",
"AggregationMode": "PerChip"
},
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/pai.json b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
index cf8563d059b9..cf8563d059b9 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z16/pai.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index 4c0aabbe33bd..f5ed7b1af419 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -526,6 +526,12 @@ test_kernel_trace()
test_virtual_lbr()
{
echo "--- Test virtual LBR ---"
+ # Check if python script is supported
+ libpython=$(perf version --build-options | grep python | grep -cv OFF)
+ if [ "${libpython}" != "1" ] ; then
+ echo "SKIP: python scripting is not supported"
+ return 2
+ fi
# Python script to determine the maximum size of branch stacks
cat << "_end_of_file_" > "${maxbrstack}"
diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c
index 110f0c609d84..5f5320f7c6e2 100644
--- a/tools/perf/trace/beauty/statx.c
+++ b/tools/perf/trace/beauty/statx.c
@@ -66,6 +66,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
P_FLAG(BLOCKS);
P_FLAG(BTIME);
P_FLAG(MNT_ID);
+ P_FLAG(DIOALIGN);
#undef P_FLAG
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 60d8beb662aa..46ada5ec3f9a 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -2325,11 +2325,19 @@ struct sym_args {
bool near;
};
+static bool kern_sym_name_match(const char *kname, const char *name)
+{
+ size_t n = strlen(name);
+
+ return !strcmp(kname, name) ||
+ (!strncmp(kname, name, n) && kname[n] == '\t');
+}
+
static bool kern_sym_match(struct sym_args *args, const char *name, char type)
{
/* A function with the same name, and global or the n'th found or any */
return kallsyms__is_function(type) &&
- !strcmp(name, args->name) &&
+ kern_sym_name_match(name, args->name) &&
((args->global && isupper(type)) ||
(args->selected && ++(args->cnt) == args->idx) ||
(!args->global && !args->selected));
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index eee64ddb766d..cc7c1f90cf62 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -36,6 +36,11 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
#endif
#ifndef HAVE_LIBBPF_BPF_PROG_LOAD
+LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ const char *license, __u32 kern_version,
+ char *log_buf, size_t log_buf_sz);
+
int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name __maybe_unused,
const char *license,
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index d657594894cf..f4adeccdbbcb 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -36,6 +36,24 @@
#include <internal/xyarray.h>
+#ifndef HAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+int bpf_program__set_insns(struct bpf_program *prog __maybe_unused,
+ struct bpf_insn *new_insns __maybe_unused, size_t new_insn_cnt __maybe_unused)
+{
+ pr_err("%s: not support, update libbpf\n", __func__);
+ return -ENOTSUP;
+}
+
+int libbpf_register_prog_handler(const char *sec __maybe_unused,
+ enum bpf_prog_type prog_type __maybe_unused,
+ enum bpf_attach_type exp_attach_type __maybe_unused,
+ const struct libbpf_prog_handler_opts *opts __maybe_unused)
+{
+ pr_err("%s: not support, update libbpf\n", __func__);
+ return -ENOTSUP;
+}
+#endif
+
/* temporarily disable libbpf deprecation warnings */
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h
index aa0c5179836d..75e2248416f5 100644
--- a/tools/perf/util/include/linux/linkage.h
+++ b/tools/perf/util/include/linux/linkage.h
@@ -115,4 +115,17 @@
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK)
#endif
+// In the kernel sources (include/linux/cfi_types.h), this has a different
+// definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
+// definition:
+#ifndef SYM_TYPED_START
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+#endif
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
#endif /* PERF_LINUX_LINKAGE_H_ */
diff --git a/tools/power/pm-graph/README b/tools/power/pm-graph/README
index e6020c0d59ec..3213dbe63b74 100644
--- a/tools/power/pm-graph/README
+++ b/tools/power/pm-graph/README
@@ -6,22 +6,22 @@
|_| |___/ |_|
pm-graph: suspend/resume/boot timing analysis tools
- Version: 5.9
+ Version: 5.10
Author: Todd Brandt <todd.e.brandt@intel.com>
- Home Page: https://01.org/pm-graph
+ Home Page: https://www.intel.com/content/www/us/en/developer/topic-technology/open/pm-graph/overview.html
Report bugs/issues at bugzilla.kernel.org Tools/pm-graph
- https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools
Full documentation available online & in man pages
- Getting Started:
- https://01.org/pm-graph/documentation/getting-started
+ https://www.intel.com/content/www/us/en/developer/articles/technical/usage.html
- - Config File Format:
- https://01.org/pm-graph/documentation/3-config-file-format
+ - Feature Summary:
+ https://www.intel.com/content/www/us/en/developer/topic-technology/open/pm-graph/features.html
- upstream version in git:
- https://github.com/intel/pm-graph/
+ git clone https://github.com/intel/pm-graph/
Table of Contents
- Overview
diff --git a/tools/power/pm-graph/sleepgraph.8 b/tools/power/pm-graph/sleepgraph.8
index 5126271de98a..643271b6fc6f 100644
--- a/tools/power/pm-graph/sleepgraph.8
+++ b/tools/power/pm-graph/sleepgraph.8
@@ -78,6 +78,9 @@ This helps maintain the consistency of test data for better comparison.
If a wifi connection is available, check that it reconnects after resume. Include
the reconnect time in the total resume time calculation and treat wifi timeouts
as resume failures.
+.TP
+\fB-wifitrace\fR
+Trace through the wifi reconnect time and include it in the timeline.
.SS "advanced"
.TP
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 33981adcdd68..cfe343306e08 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -86,7 +86,7 @@ def ascii(text):
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.9'
+ version = '5.10'
ansi = False
rs = 0
display = ''
@@ -100,6 +100,7 @@ class SystemValues:
ftracelog = False
acpidebug = True
tstat = True
+ wifitrace = False
mindevlen = 0.0001
mincglen = 0.0
cgphase = ''
@@ -124,6 +125,7 @@ class SystemValues:
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
s0ixpath = '/sys/module/intel_pmc_core/parameters/warn_on_s0ix_failures'
+ s0ixres = '/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
@@ -180,6 +182,7 @@ class SystemValues:
tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
tracefuncs = {
+ 'async_synchronize_full': {},
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
@@ -304,6 +307,7 @@ class SystemValues:
[2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
[2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
[2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
+ [2, 'thermal', 'sh', '-c', 'grep . /sys/class/thermal/thermal_zone*/temp'],
]
cgblacklist = []
kprobes = dict()
@@ -777,7 +781,7 @@ class SystemValues:
return
if not quiet:
sysvals.printSystemInfo(False)
- pprint('INITIALIZING FTRACE...')
+ pprint('INITIALIZING FTRACE')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
@@ -841,7 +845,7 @@ class SystemValues:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
if not quiet:
- pprint('INITIALIZING KPROBES...')
+ pprint('INITIALIZING KPROBES')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
@@ -1133,6 +1137,15 @@ class SystemValues:
self.cfgdef[file] = fp.read().strip()
fp.write(value)
fp.close()
+ def s0ixSupport(self):
+ if not os.path.exists(self.s0ixres) or not os.path.exists(self.mempowerfile):
+ return False
+ fp = open(sysvals.mempowerfile, 'r')
+ data = fp.read().strip()
+ fp.close()
+ if '[s2idle]' in data:
+ return True
+ return False
def haveTurbostat(self):
if not self.tstat:
return False
@@ -1146,7 +1159,7 @@ class SystemValues:
self.vprint(out)
return True
return False
- def turbostat(self):
+ def turbostat(self, s0ixready):
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
@@ -1173,6 +1186,8 @@ class SystemValues:
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
+ if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
+ continue
out.append('%s=%s' % (key, val))
return '|'.join(out)
def netfixon(self, net='both'):
@@ -1183,14 +1198,6 @@ class SystemValues:
out = ascii(fp.read()).strip()
fp.close()
return out
- def wifiRepair(self):
- out = self.netfixon('wifi')
- if not out or 'error' in out.lower():
- return ''
- m = re.match('WIFI \S* ONLINE (?P<action>\S*)', out)
- if not m:
- return 'dead'
- return m.group('action')
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
@@ -1220,11 +1227,6 @@ class SystemValues:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
- if self.netfix:
- res = self.wifiRepair()
- if res:
- timeout = max(0, time.time() - start)
- return '%s %s %d' % (self.wifiDetails(dev), res, timeout)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
@@ -1346,6 +1348,20 @@ class SystemValues:
for i in self.rslist:
self.setVal(self.rstgt, i)
pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
+ def start(self, pm):
+ if self.useftrace:
+ self.dlog('start ftrace tracing')
+ self.fsetVal('1', 'tracing_on')
+ if self.useprocmon:
+ self.dlog('start the process monitor')
+ pm.start()
+ def stop(self, pm):
+ if self.useftrace:
+ if self.useprocmon:
+ self.dlog('stop the process monitor')
+ pm.stop()
+ self.dlog('stop ftrace tracing')
+ self.fsetVal('0', 'tracing_on')
sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
@@ -1643,19 +1659,20 @@ class Data:
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
- title = cdata+' '+rdata
- mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
- m = re.match(mstr, title)
- if m:
- c = m.group('caller')
- a = m.group('args').strip()
- r = m.group('ret')
+ mc = re.match('\(.*\) *(?P<args>.*)', cdata)
+ mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
+ if mc and mr:
+ c = mr.group('caller').split('+')[0]
+ a = mc.group('args').strip()
+ r = mr.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
+ else:
+ return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
@@ -1772,6 +1789,14 @@ class Data:
e.time = self.trimTimeVal(e.time, t0, dT, left)
e.end = self.trimTimeVal(e.end, t0, dT, left)
e.length = e.end - e.time
+ if('cpuexec' in d):
+ cpuexec = dict()
+ for e in d['cpuexec']:
+ c0, cN = e
+ c0 = self.trimTimeVal(c0, t0, dT, left)
+ cN = self.trimTimeVal(cN, t0, dT, left)
+ cpuexec[(c0, cN)] = d['cpuexec'][e]
+ d['cpuexec'] = cpuexec
for dir in ['suspend', 'resume']:
list = []
for e in self.errorinfo[dir]:
@@ -2086,75 +2111,43 @@ class Data:
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
- maxC = 0
- tlast = 0
- start = -1
- end = -1
+ cpuexec = dict()
+ tlast = start = end = -1
for t in sorted(times):
- if tlast == 0:
+ if tlast < 0:
tlast = t
continue
- if name in self.pstl[t]:
- if start == -1 or tlast < start:
+ if name in self.pstl[t] and self.pstl[t][name] > 0:
+ if start < 0:
start = tlast
- if end == -1 or t > end:
- end = t
+ end, key = t, (tlast, t)
+ maxj = (t - tlast) * 1024.0
+ cpuexec[key] = min(1.0, float(self.pstl[t][name]) / maxj)
tlast = t
- if start == -1 or end == -1:
- return 0
+ if start < 0 or end < 0:
+ return
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
- if not out:
- return 0
- phase, devname = out
- dev = self.dmesg[phase]['list'][devname]
- # get the cpu exec data
- tlast = 0
- clast = 0
- cpuexec = dict()
- for t in sorted(times):
- if tlast == 0 or t <= start or t > end:
- tlast = t
- continue
- list = self.pstl[t]
- c = 0
- if name in list:
- c = list[name]
- if c > maxC:
- maxC = c
- if c != clast:
- key = (tlast, t)
- cpuexec[key] = c
- tlast = t
- clast = c
- dev['cpuexec'] = cpuexec
- return maxC
+ if out:
+ phase, devname = out
+ dev = self.dmesg[phase]['list'][devname]
+ dev['cpuexec'] = cpuexec
def createProcessUsageEvents(self):
- # get an array of process names
- proclist = []
- for t in sorted(self.pstl):
- pslist = self.pstl[t]
- for ps in sorted(pslist):
- if ps not in proclist:
- proclist.append(ps)
- # get a list of data points for suspend and resume
- tsus = []
- tres = []
+ # get an array of process names and times
+ proclist = {'sus': dict(), 'res': dict()}
+ tdata = {'sus': [], 'res': []}
for t in sorted(self.pstl):
- if t < self.tSuspended:
- tsus.append(t)
- else:
- tres.append(t)
+ dir = 'sus' if t < self.tSuspended else 'res'
+ for ps in sorted(self.pstl[t]):
+ if ps not in proclist[dir]:
+ proclist[dir][ps] = 0
+ tdata[dir].append(t)
# process the events for suspend and resume
- if len(proclist) > 0:
+ if len(proclist['sus']) > 0 or len(proclist['res']) > 0:
sysvals.vprint('Process Execution:')
- for ps in proclist:
- c = self.addProcessUsageEvent(ps, tsus)
- if c > 0:
- sysvals.vprint('%25s (sus): %d' % (ps, c))
- c = self.addProcessUsageEvent(ps, tres)
- if c > 0:
- sysvals.vprint('%25s (res): %d' % (ps, c))
+ for dir in ['sus', 'res']:
+ for ps in sorted(proclist[dir]):
+ self.addProcessUsageEvent(ps, tdata[dir])
def handleEndMarker(self, time, msg=''):
dm = self.dmesg
self.setEnd(time, msg)
@@ -3218,7 +3211,7 @@ class ProcessMonitor:
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_ret: (']
- techeck = ['suspend_resume', 'device_pm_callback']
+ techeck = ['suspend_resume', 'device_pm_callback', 'tracing_mark_write']
tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
@@ -3241,7 +3234,7 @@ def doesTraceLogHaveTraceEvents():
check.remove(i)
tmcheck = check
fp.close()
- sysvals.usetraceevents = True if len(techeck) < 2 else False
+ sysvals.usetraceevents = True if len(techeck) < 3 else False
sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
@@ -3456,6 +3449,8 @@ def parseTraceLog(live=False):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
+ if t.name == 'CMD COMPLETE' and data.tKernRes == 0:
+ data.tKernRes = t.time
m = re.match(tp.procexecfmt, t.name)
if(m):
parts, msg = 1, m.group('ps')
@@ -3674,6 +3669,9 @@ def parseTraceLog(live=False):
e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
if not e:
continue
+ if (t.time - e['begin']) * 1000 < sysvals.mindevlen:
+ tp.ktemp[key].pop()
+ continue
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
@@ -4213,6 +4211,8 @@ def callgraphHTML(sv, hf, num, cg, title, color, devid):
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if line.isLeaf():
+ if line.length * 1000 < sv.mincglen:
+ continue
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
@@ -4827,14 +4827,11 @@ def createHTML(testruns, testfail):
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
- j = float(dev['cpuexec'][t]) / 5
- if j > 1.0:
- j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
- color = 'rgba(255, 0, 0, %f)' % j
+ color = 'rgba(255, 0, 0, %f)' % dev['cpuexec'][t]
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
@@ -5453,17 +5450,9 @@ def executeSuspend(quiet=False):
call('sync', shell=True)
sv.dlog('read dmesg')
sv.initdmesg()
- # start ftrace
- if sv.useftrace:
- if not quiet:
- pprint('START TRACING')
- sv.dlog('start ftrace tracing')
- sv.fsetVal('1', 'tracing_on')
- if sv.useprocmon:
- sv.dlog('start the process monitor')
- pm.start()
- sv.dlog('run the cmdinfo list before')
+ sv.dlog('cmdinfo before')
sv.cmdinfo(True)
+ sv.start(pm)
# execute however many s/r runs requested
for count in range(1,sv.execcount+1):
# x2delay in between test runs
@@ -5500,6 +5489,7 @@ def executeSuspend(quiet=False):
if res != 0:
tdata['error'] = 'cmd returned %d' % res
else:
+ s0ixready = sv.s0ixSupport()
mode = sv.suspendmode
if sv.memmode and os.path.exists(sv.mempowerfile):
mode = 'mem'
@@ -5509,9 +5499,10 @@ def executeSuspend(quiet=False):
sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
if sv.acpidebug:
sv.testVal(sv.acpipath, 'acpi', '0xe')
- if mode == 'freeze' and sv.haveTurbostat():
+ if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
+ and sv.haveTurbostat():
# execution will pause here
- turbo = sv.turbostat()
+ turbo = sv.turbostat(s0ixready)
if turbo:
tdata['turbo'] = turbo
else:
@@ -5522,7 +5513,8 @@ def executeSuspend(quiet=False):
pf.close()
except Exception as e:
tdata['error'] = str(e)
- sv.dlog('system returned from resume')
+ sv.fsetVal('CMD COMPLETE', 'trace_marker')
+ sv.dlog('system returned')
# reset everything
sv.testVal('restoreall')
if(sv.rtcwake):
@@ -5535,33 +5527,29 @@ def executeSuspend(quiet=False):
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
- sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+ if(count < sv.execcount):
+ sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+ elif(not sv.wifitrace):
+ sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+ sv.stop(pm)
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
- if sv.netfix:
- netfixout = sv.netfixon('wired')
- elif sv.netfix:
- netfixout = sv.netfixon()
- if sv.netfix and netfixout:
- tdata['netfix'] = netfixout
+ if(count == sv.execcount and sv.wifitrace):
+ sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+ sv.stop(pm)
+ if sv.netfix:
+ tdata['netfix'] = sv.netfixon()
sv.dlog('netfix, %s' % tdata['netfix'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
testdata.append(tdata)
- sv.dlog('run the cmdinfo list after')
+ sv.dlog('cmdinfo after')
cmdafter = sv.cmdinfo(False)
- # stop ftrace
- if sv.useftrace:
- if sv.useprocmon:
- sv.dlog('stop the process monitor')
- pm.stop()
- sv.fsetVal('0', 'tracing_on')
# grab a copy of the dmesg output
if not quiet:
pprint('CAPTURING DMESG')
- sysvals.dlog('EXECUTION TRACE END')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
if sv.useftrace:
@@ -6350,6 +6338,8 @@ def data_from_html(file, outpath, issues, fulldetail=False):
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
+ if name == 'async_synchronize_full':
+ continue
if ' async' in name or ' sync' in name:
name = ' '.join(name.split(' ')[:-1])
if phase.startswith('suspend'):
@@ -6701,6 +6691,7 @@ def printHelp():
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
+ ' -wifitrace Trace kernel execution through wifi reconnect.\n'\
' -netfix Use netfix to reset the network in the event it fails to resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
@@ -6828,6 +6819,8 @@ if __name__ == '__main__':
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
+ elif(arg == '-wifitrace'):
+ sysvals.wifitrace = True
elif(arg == '-netfix'):
sysvals.netfix = True
elif(arg == '-gzip'):
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index a072b2d3e726..7edce12fd2ce 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -12,30 +12,62 @@
#include "mock.h"
#define NR_CXL_HOST_BRIDGES 2
+#define NR_CXL_SINGLE_HOST 1
#define NR_CXL_ROOT_PORTS 2
#define NR_CXL_SWITCH_PORTS 2
#define NR_CXL_PORT_DECODERS 8
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
-static struct platform_device
- *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
-static struct platform_device
- *cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
-static struct platform_device
- *cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS *
- NR_CXL_SWITCH_PORTS];
-struct platform_device
- *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS];
+#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
+static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
+static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
+#define NR_MEM_MULTI \
+ (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
+static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
+
+static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
+static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
+static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
+#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
+static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
+
+struct platform_device *cxl_mem[NR_MEM_MULTI];
+struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
+
+
+static inline bool is_multi_bridge(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
+ if (&cxl_host_bridge[i]->dev == dev)
+ return true;
+ return false;
+}
+
+static inline bool is_single_bridge(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
+ if (&cxl_hb_single[i]->dev == dev)
+ return true;
+ return false;
+}
static struct acpi_device acpi0017_mock;
-static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
+static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = {
[0] = {
.handle = &host_bridge[0],
},
[1] = {
.handle = &host_bridge[1],
},
+ [2] = {
+ .handle = &host_bridge[2],
+ },
+
};
static bool is_mock_dev(struct device *dev)
@@ -45,6 +77,9 @@ static bool is_mock_dev(struct device *dev)
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
if (dev == &cxl_mem[i]->dev)
return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
+ if (dev == &cxl_mem_single[i]->dev)
+ return true;
if (dev == &cxl_acpi->dev)
return true;
return false;
@@ -66,7 +101,7 @@ static bool is_mock_adev(struct acpi_device *adev)
static struct {
struct acpi_table_cedt cedt;
- struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
+ struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST];
struct {
struct acpi_cedt_cfmws cfmws;
u32 target[1];
@@ -83,6 +118,10 @@ static struct {
struct acpi_cedt_cfmws cfmws;
u32 target[2];
} cfmws3;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws4;
} __packed mock_cedt = {
.cedt = {
.header = {
@@ -107,6 +146,14 @@ static struct {
.uid = 1,
.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
},
+ .chbs[2] = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CHBS,
+ .length = sizeof(mock_cedt.chbs[0]),
+ },
+ .uid = 2,
+ .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
+ },
.cfmws0 = {
.cfmws = {
.header = {
@@ -167,13 +214,29 @@ static struct {
},
.target = { 0, 1, },
},
+ .cfmws4 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws4),
+ },
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = 4,
+ .window_size = SZ_256M * 4UL,
+ },
+ .target = { 2 },
+ },
};
-struct acpi_cedt_cfmws *mock_cfmws[4] = {
+struct acpi_cedt_cfmws *mock_cfmws[] = {
[0] = &mock_cedt.cfmws0.cfmws,
[1] = &mock_cedt.cfmws1.cfmws,
[2] = &mock_cedt.cfmws2.cfmws,
[3] = &mock_cedt.cfmws3.cfmws,
+ [4] = &mock_cedt.cfmws4.cfmws,
};
struct cxl_mock_res {
@@ -304,6 +367,9 @@ static bool is_mock_bridge(struct device *dev)
for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
if (dev == &cxl_host_bridge[i]->dev)
return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
+ if (dev == &cxl_hb_single[i]->dev)
+ return true;
return false;
}
@@ -326,6 +392,18 @@ static bool is_mock_port(struct device *dev)
if (dev == &cxl_switch_dport[i]->dev)
return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
+ if (dev == &cxl_root_single[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
+ if (dev == &cxl_swu_single[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
+ if (dev == &cxl_swd_single[i]->dev)
+ return true;
+
if (is_cxl_memdev(dev))
return is_mock_dev(dev->parent);
@@ -561,11 +639,31 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
int i, array_size;
if (port->depth == 1) {
- array_size = ARRAY_SIZE(cxl_root_port);
- array = cxl_root_port;
+ if (is_multi_bridge(port->uport)) {
+ array_size = ARRAY_SIZE(cxl_root_port);
+ array = cxl_root_port;
+ } else if (is_single_bridge(port->uport)) {
+ array_size = ARRAY_SIZE(cxl_root_single);
+ array = cxl_root_single;
+ } else {
+ dev_dbg(&port->dev, "%s: unknown bridge type\n",
+ dev_name(port->uport));
+ return -ENXIO;
+ }
} else if (port->depth == 2) {
- array_size = ARRAY_SIZE(cxl_switch_dport);
- array = cxl_switch_dport;
+ struct cxl_port *parent = to_cxl_port(port->dev.parent);
+
+ if (is_multi_bridge(parent->uport)) {
+ array_size = ARRAY_SIZE(cxl_switch_dport);
+ array = cxl_switch_dport;
+ } else if (is_single_bridge(parent->uport)) {
+ array_size = ARRAY_SIZE(cxl_swd_single);
+ array = cxl_swd_single;
+ } else {
+ dev_dbg(&port->dev, "%s: unknown bridge type\n",
+ dev_name(port->uport));
+ return -ENXIO;
+ }
} else {
dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
port->depth);
@@ -576,8 +674,12 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
struct platform_device *pdev = array[i];
struct cxl_dport *dport;
- if (pdev->dev.parent != port->uport)
+ if (pdev->dev.parent != port->uport) {
+ dev_dbg(&port->dev, "%s: mismatch parent %s\n",
+ dev_name(port->uport),
+ dev_name(pdev->dev.parent));
continue;
+ }
dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
CXL_RESOURCE_NONE);
@@ -627,6 +729,157 @@ static void mock_companion(struct acpi_device *adev, struct device *dev)
#define SZ_512G (SZ_64G * 8)
#endif
+static __init int cxl_single_init(void)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
+ struct acpi_device *adev =
+ &host_bridge[NR_CXL_HOST_BRIDGES + i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_host_bridge",
+ NR_CXL_HOST_BRIDGES + i);
+ if (!pdev)
+ goto err_bridge;
+
+ mock_companion(adev, &pdev->dev);
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_bridge;
+ }
+
+ cxl_hb_single[i] = pdev;
+ rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
+ "physical_node");
+ if (rc)
+ goto err_bridge;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
+ struct platform_device *bridge =
+ cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_root_port",
+ NR_MULTI_ROOT + i);
+ if (!pdev)
+ goto err_port;
+ pdev->dev.parent = &bridge->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_port;
+ }
+ cxl_root_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
+ struct platform_device *root_port = cxl_root_single[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_uport",
+ NR_MULTI_ROOT + i);
+ if (!pdev)
+ goto err_uport;
+ pdev->dev.parent = &root_port->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_uport;
+ }
+ cxl_swu_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
+ struct platform_device *uport =
+ cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_dport",
+ i + NR_MEM_MULTI);
+ if (!pdev)
+ goto err_dport;
+ pdev->dev.parent = &uport->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_dport;
+ }
+ cxl_swd_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
+ struct platform_device *dport = cxl_swd_single[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
+ if (!pdev)
+ goto err_mem;
+ pdev->dev.parent = &dport->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_mem;
+ }
+ cxl_mem_single[i] = pdev;
+ }
+
+ return 0;
+
+err_mem:
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+err_dport:
+ for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swd_single[i]);
+err_uport:
+ for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swu_single[i]);
+err_port:
+ for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_single[i]);
+err_bridge:
+ for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_hb_single[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_hb_single[i]);
+ }
+
+ return rc;
+}
+
+static void cxl_single_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+ for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swd_single[i]);
+ for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swu_single[i]);
+ for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_single[i]);
+ for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_hb_single[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_hb_single[i]);
+ }
+}
+
static __init int cxl_test_init(void)
{
int rc, i;
@@ -695,7 +948,7 @@ static __init int cxl_test_init(void)
pdev = platform_device_alloc("cxl_switch_uport", i);
if (!pdev)
- goto err_port;
+ goto err_uport;
pdev->dev.parent = &root_port->dev;
rc = platform_device_add(pdev);
@@ -713,7 +966,7 @@ static __init int cxl_test_init(void)
pdev = platform_device_alloc("cxl_switch_dport", i);
if (!pdev)
- goto err_port;
+ goto err_dport;
pdev->dev.parent = &uport->dev;
rc = platform_device_add(pdev);
@@ -724,7 +977,6 @@ static __init int cxl_test_init(void)
cxl_switch_dport[i] = pdev;
}
- BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport));
for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
struct platform_device *dport = cxl_switch_dport[i];
struct platform_device *pdev;
@@ -743,9 +995,13 @@ static __init int cxl_test_init(void)
cxl_mem[i] = pdev;
}
+ rc = cxl_single_init();
+ if (rc)
+ goto err_mem;
+
cxl_acpi = platform_device_alloc("cxl_acpi", 0);
if (!cxl_acpi)
- goto err_mem;
+ goto err_single;
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
acpi0017_mock.dev.bus = &platform_bus_type;
@@ -758,6 +1014,8 @@ static __init int cxl_test_init(void)
err_add:
platform_device_put(cxl_acpi);
+err_single:
+ cxl_single_exit();
err_mem:
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
platform_device_unregister(cxl_mem[i]);
@@ -793,6 +1051,7 @@ static __exit void cxl_test_exit(void)
int i;
platform_device_unregister(cxl_acpi);
+ cxl_single_exit();
for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
platform_device_unregister(cxl_mem[i]);
for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 0464b2c6c1e4..f07aef7c592c 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -49,6 +49,7 @@ TARGETS += net
TARGETS += net/af_unix
TARGETS += net/forwarding
TARGETS += net/mptcp
+TARGETS += net/openvswitch
TARGETS += netfilter
TARGETS += nsfs
TARGETS += pidfd
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 127b8caa3dc1..24dd6214394e 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3936,6 +3936,19 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid type_id",
},
{
+ .descr = "decl_tag test #16, func proto, return type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */
+ BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1"),
+ .btf_load_err = true,
+ .err_str = "Invalid return type",
+},
+{
.descr = "type_tag test #1",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c
index 099c23d9aa21..b39093dd5715 100644
--- a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c
+++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c
@@ -47,14 +47,14 @@ record_sample(struct bpf_dynptr *dynptr, void *context)
if (status) {
bpf_printk("bpf_dynptr_read() failed: %d\n", status);
err = 1;
- return 0;
+ return 1;
}
} else {
sample = bpf_dynptr_data(dynptr, 0, sizeof(*sample));
if (!sample) {
bpf_printk("Unexpectedly failed to get sample\n");
err = 2;
- return 0;
+ return 1;
}
stack_sample = *sample;
}
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index e9dab5f9d773..6b8d2e2f23c2 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -7,6 +7,8 @@ TEST_PROGS := \
bond-lladdr-target.sh \
dev_addr_lists.sh
-TEST_FILES := lag_lib.sh
+TEST_FILES := \
+ lag_lib.sh \
+ net_forwarding_lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
index e6fa24eded5b..5cfe7d8ebc25 100755
--- a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
@@ -14,7 +14,7 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/../../../net/forwarding/lib.sh
+source "$lib_dir"/net_forwarding_lib.sh
source "$lib_dir"/lag_lib.sh
diff --git a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
new file mode 120000
index 000000000000..39c96828c5ef
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
index dca8be6092b9..a1f269ee84da 100755
--- a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
+++ b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
@@ -18,8 +18,8 @@ NUM_NETIFS=1
REQUIRE_JQ="no"
REQUIRE_MZ="no"
NETIF_CREATE="no"
-lib_dir=$(dirname $0)/../../../net/forwarding
-source $lib_dir/lib.sh
+lib_dir=$(dirname "$0")
+source "$lib_dir"/lib.sh
cleanup() {
echo "Cleaning up"
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
index 642d8df1c137..6a86e61e8bfe 100644
--- a/tools/testing/selftests/drivers/net/team/Makefile
+++ b/tools/testing/selftests/drivers/net/team/Makefile
@@ -3,4 +3,8 @@
TEST_PROGS := dev_addr_lists.sh
+TEST_FILES := \
+ lag_lib.sh \
+ net_forwarding_lib.sh
+
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
index debda7262956..33913112d5ca 100755
--- a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
@@ -11,14 +11,14 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/../../../net/forwarding/lib.sh
+source "$lib_dir"/net_forwarding_lib.sh
-source "$lib_dir"/../bonding/lag_lib.sh
+source "$lib_dir"/lag_lib.sh
destroy()
{
- local ifnames=(dummy0 dummy1 team0 mv0)
+ local ifnames=(dummy1 dummy2 team0 mv0)
local ifname
for ifname in "${ifnames[@]}"; do
diff --git a/tools/testing/selftests/drivers/net/team/lag_lib.sh b/tools/testing/selftests/drivers/net/team/lag_lib.sh
new file mode 120000
index 000000000000..e1347a10afde
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/lag_lib.sh
@@ -0,0 +1 @@
+../bonding/lag_lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
new file mode 120000
index 000000000000..39c96828c5ef
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
index db522577ff78..d3a79da215c8 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
echo 0 > events/enable
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc
index 914fe2e5d030..6461c375694f 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger eprobe on synthetic event
-# requires: dynamic_events synthetic_events events/syscalls/sys_enter_openat/hist "e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events synthetic_events events/syscalls/sys_enter_openat/hist "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
echo 0 > events/enable
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 732149011692..5a0e0df8de9b 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../ -I../../../../../usr/include/
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
LDLIBS := -lpthread -lrt
-HEADERS := \
+LOCAL_HDRS := \
../include/futextest.h \
../include/atomic.h \
../include/logging.h
-TEST_GEN_FILES := \
+TEST_GEN_PROGS := \
futex_wait_timeout \
futex_wait_wouldblock \
futex_requeue_pi \
@@ -24,5 +24,3 @@ TEST_PROGS := run.sh
top_srcdir = ../../../../..
DEFAULT_INSTALL_HDR_PATH := 1
include ../../lib.mk
-
-$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
index 39f0fa2a8fd6..05d66ef50c97 100644
--- a/tools/testing/selftests/intel_pstate/Makefile
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -2,10 +2,10 @@
CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
LDLIBS += -lm
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-ifeq (x86,$(ARCH))
+ifeq (x86,$(ARCH_PROCESSED))
TEST_GEN_FILES := msr aperf
endif
diff --git a/tools/testing/selftests/kexec/Makefile b/tools/testing/selftests/kexec/Makefile
index 806a150648c3..67fe7a46cb62 100644
--- a/tools/testing/selftests/kexec/Makefile
+++ b/tools/testing/selftests/kexec/Makefile
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
# Makefile for kexec tests
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-ifeq ($(ARCH),$(filter $(ARCH),x86 ppc64le))
+ifeq ($(ARCH_PROCESSED),$(filter $(ARCH_PROCESSED),x86 ppc64le))
TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh
TEST_FILES := kexec_common_lib.sh
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 2f0d705db9db..082855d94c72 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -16,16 +16,18 @@
/x86_64/cpuid_test
/x86_64/cr4_cpuid_sync_test
/x86_64/debug_regs
-/x86_64/evmcs_test
-/x86_64/emulator_error_test
+/x86_64/exit_on_emulation_failure_test
/x86_64/fix_hypercall_test
/x86_64/get_msr_index_features
/x86_64/kvm_clock_test
/x86_64/kvm_pv_test
/x86_64/hyperv_clock
/x86_64/hyperv_cpuid
+/x86_64/hyperv_evmcs
/x86_64/hyperv_features
+/x86_64/hyperv_ipi
/x86_64/hyperv_svm_test
+/x86_64/hyperv_tlb_flush
/x86_64/max_vcpuid_cap_test
/x86_64/mmio_warning_test
/x86_64/monitor_mwait_test
@@ -36,11 +38,13 @@
/x86_64/set_boot_cpu_id
/x86_64/set_sregs_test
/x86_64/sev_migrate_tests
+/x86_64/smaller_maxphyaddr_emulation_test
/x86_64/smm_test
/x86_64/state_test
/x86_64/svm_vmcall_test
/x86_64/svm_int_ctl_test
/x86_64/svm_nested_soft_inject_test
+/x86_64/svm_nested_shutdown_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/tsc_scaling_sync
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0172eb6cb6ee..2275ba861e0e 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -43,16 +43,18 @@ LIBKVM += lib/elf.c
LIBKVM += lib/guest_modes.c
LIBKVM += lib/io.c
LIBKVM += lib/kvm_util.c
-LIBKVM += lib/perf_test_util.c
+LIBKVM += lib/memstress.c
LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
+LIBKVM += lib/ucall_common.c
LIBKVM_STRING += lib/string_override.c
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
-LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
+LIBKVM_x86_64 += lib/x86_64/hyperv.c
+LIBKVM_x86_64 += lib/x86_64/memstress.c
LIBKVM_x86_64 += lib/x86_64/processor.c
LIBKVM_x86_64 += lib/x86_64/svm.c
LIBKVM_x86_64 += lib/x86_64/ucall.c
@@ -80,13 +82,15 @@ TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
-TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
-TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test
+TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
@@ -96,11 +100,13 @@ TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
+TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
index 6f9c1f19c7f6..b1d2158c0b6d 100644
--- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
@@ -158,12 +158,9 @@ int main(void)
TEST_REQUIRE(vcpu_aarch64_only(vcpu));
- ucall_init(vm, NULL);
-
test_user_raz_wi(vcpu);
test_user_raz_invariant(vcpu);
test_guest_raz(vcpu);
- ucall_uninit(vm);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 574eb73f0e90..f2a96779716a 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -375,7 +375,6 @@ static struct kvm_vm *test_vm_create(void)
for (i = 0; i < nr_vcpus; i++)
vcpu_init_descriptor_tables(vcpus[i]);
- ucall_init(vm, NULL);
test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
@@ -414,36 +413,21 @@ static bool parse_args(int argc, char *argv[])
while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) {
switch (opt) {
case 'n':
- test_args.nr_vcpus = atoi(optarg);
- if (test_args.nr_vcpus <= 0) {
- pr_info("Positive value needed for -n\n");
- goto err;
- } else if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
+ test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
+ if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
- test_args.nr_iter = atoi(optarg);
- if (test_args.nr_iter <= 0) {
- pr_info("Positive value needed for -i\n");
- goto err;
- }
+ test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
- test_args.timer_period_ms = atoi(optarg);
- if (test_args.timer_period_ms <= 0) {
- pr_info("Positive value needed for -p\n");
- goto err;
- }
+ test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
- test_args.migration_freq_ms = atoi(optarg);
- if (test_args.migration_freq_ms < 0) {
- pr_info("0 or positive value needed for -m\n");
- goto err;
- }
+ test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'h':
default:
@@ -462,9 +446,6 @@ int main(int argc, char *argv[])
{
struct kvm_vm *vm;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
index 947bd201435c..d86c4e4d1c82 100644
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -241,7 +241,6 @@ static void guest_svc_handler(struct ex_regs *regs)
enum single_step_op {
SINGLE_STEP_ENABLE = 0,
- SINGLE_STEP_DISABLE = 1,
};
static void guest_code_ss(int test_cnt)
@@ -258,7 +257,7 @@ static void guest_code_ss(int test_cnt)
GUEST_SYNC(SINGLE_STEP_ENABLE);
/*
- * The userspace will veriry that the pc is as expected during
+ * The userspace will verify that the pc is as expected during
* single step execution between iter_ss_begin and iter_ss_end.
*/
asm volatile("iter_ss_begin:nop\n");
@@ -268,11 +267,9 @@ static void guest_code_ss(int test_cnt)
bvr = read_sysreg(dbgbvr0_el1);
wvr = read_sysreg(dbgwvr0_el1);
+ /* Userspace disables Single Step when the end is nigh. */
asm volatile("iter_ss_end:\n");
- /* Disable Single Step execution */
- GUEST_SYNC(SINGLE_STEP_DISABLE);
-
GUEST_ASSERT(bvr == w_bvr);
GUEST_ASSERT(wvr == w_wvr);
}
@@ -295,7 +292,6 @@ static void test_guest_debug_exceptions(void)
int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
@@ -346,7 +342,6 @@ void test_single_step_from_userspace(int test_cnt)
struct kvm_guest_debug debug = {};
vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
- ucall_init(vm, NULL);
run = vcpu->run;
vcpu_args_set(vcpu, 1, test_cnt);
@@ -364,15 +359,12 @@ void test_single_step_from_userspace(int test_cnt)
TEST_ASSERT(cmd == UCALL_SYNC,
"Unexpected ucall cmd 0x%lx", cmd);
- if (uc.args[1] == SINGLE_STEP_ENABLE) {
- debug.control = KVM_GUESTDBG_ENABLE |
- KVM_GUESTDBG_SINGLESTEP;
- ss_enable = true;
- } else {
- debug.control = SINGLE_STEP_DISABLE;
- ss_enable = false;
- }
+ TEST_ASSERT(uc.args[1] == SINGLE_STEP_ENABLE,
+ "Unexpected ucall action 0x%lx", uc.args[1]);
+ debug.control = KVM_GUESTDBG_ENABLE |
+ KVM_GUESTDBG_SINGLESTEP;
+ ss_enable = true;
vcpu_guest_debug_set(vcpu, &debug);
continue;
}
@@ -385,6 +377,14 @@ void test_single_step_from_userspace(int test_cnt)
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
+ if ((pc + 4) == (uint64_t)&iter_ss_end) {
+ test_pc = 0;
+ debug.control = KVM_GUESTDBG_ENABLE;
+ ss_enable = false;
+ vcpu_guest_debug_set(vcpu, &debug);
+ continue;
+ }
+
/*
* If the current pc is between iter_ss_bgin and
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
@@ -423,7 +423,7 @@ int main(int argc, char *argv[])
while ((opt = getopt(argc, argv, "i:")) != -1) {
switch (opt) {
case 'i':
- ss_iteration = atoi(optarg);
+ ss_iteration = atoi_positive("Number of iterations", optarg);
break;
case 'h':
default:
diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c
index a39da3fe4952..bef1499fb465 100644
--- a/tools/testing/selftests/kvm/aarch64/hypercalls.c
+++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c
@@ -236,7 +236,6 @@ static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
vm = vm_create_with_one_vcpu(vcpu, guest_code);
- ucall_init(vm, NULL);
steal_time_init(*vcpu);
return vm;
@@ -306,8 +305,6 @@ static void test_run(void)
int main(void)
{
- setbuf(stdout, NULL);
-
test_run();
return 0;
}
diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c
index e0b9e81a3e09..cfa36f387948 100644
--- a/tools/testing/selftests/kvm/aarch64/psci_test.c
+++ b/tools/testing/selftests/kvm/aarch64/psci_test.c
@@ -79,7 +79,6 @@ static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
struct kvm_vm *vm;
vm = vm_create(2);
- ucall_init(vm, NULL);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index 9c131d977a1b..eef816b80993 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -68,8 +68,6 @@ static void guest_code(void)
/* we don't want to assert on run execution, hence that helper */
static int run_vcpu(struct kvm_vcpu *vcpu)
{
- ucall_init(vcpu->vm, NULL);
-
return __vcpu_run(vcpu) ? -errno : 0;
}
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
index 17417220a083..90d854e0fcff 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
@@ -756,7 +756,6 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
print_args(&args);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
@@ -818,22 +817,19 @@ int main(int argc, char **argv)
int opt;
bool eoi_split = false;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
switch (opt) {
case 'n':
- nr_irqs = atoi(optarg);
+ nr_irqs = atoi_non_negative("Number of IRQs", optarg);
if (nr_irqs > 1024 || nr_irqs % 32)
help(argv[0]);
break;
case 'e':
- eoi_split = (bool)atoi(optarg);
+ eoi_split = (bool)atoi_paranoid(optarg);
default_args = false;
break;
case 'l':
- level_sensitive = (bool)atoi(optarg);
+ level_sensitive = (bool)atoi_paranoid(optarg);
default_args = false;
break;
case 'h':
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 76c583a07ea2..02d3587cab0a 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -44,7 +44,7 @@
#include "kvm_util.h"
#include "test_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "guest_modes.h"
/* Global variable used to synchronize all of the vCPU threads. */
@@ -126,7 +126,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
}
static void mark_vcpu_memory_idle(struct kvm_vm *vm,
- struct perf_test_vcpu_args *vcpu_args)
+ struct memstress_vcpu_args *vcpu_args)
{
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t base_gva = vcpu_args->gva;
@@ -148,7 +148,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
for (page = 0; page < pages; page++) {
- uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+ uint64_t gva = base_gva + page * memstress_args.guest_page_size;
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
if (!pfn) {
@@ -220,10 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}
-static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
+ struct kvm_vm *vm = memstress_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0;
@@ -279,7 +279,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti
static void access_memory(struct kvm_vm *vm, int nr_vcpus,
enum access_type access, const char *description)
{
- perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
+ memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, nr_vcpus, description);
}
@@ -303,10 +303,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
int nr_vcpus = params->nr_vcpus;
- vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
+ vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
+ memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
@@ -324,8 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Set done to signal the vCPU threads to exit */
done = true;
- perf_test_join_vcpu_threads(nr_vcpus);
- perf_test_destroy_vm(vm);
+ memstress_join_vcpu_threads(nr_vcpus);
+ memstress_destroy_vm(vm);
}
static void help(char *name)
@@ -368,7 +368,7 @@ int main(int argc, char *argv[])
params.vcpu_memory_bytes = parse_size(optarg);
break;
case 'v':
- params.nr_vcpus = atoi(optarg);
+ params.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
break;
case 'o':
overlap_memory_access = true;
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 779ae54f89c4..3a977ddf07b2 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -20,7 +20,7 @@
#include "kvm_util.h"
#include "test_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "guest_modes.h"
#ifdef __NR_userfaultfd
@@ -42,7 +42,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
@@ -285,7 +285,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
int r, i;
- vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
+ vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type);
@@ -307,11 +307,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
for (i = 0; i < nr_vcpus; i++) {
- struct perf_test_vcpu_args *vcpu_args;
+ struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva;
void *vcpu_alias;
- vcpu_args = &perf_test_args.vcpu_args[i];
+ vcpu_args = &memstress_args.vcpu_args[i];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
@@ -329,17 +329,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds[i * 2], p->uffd_mode,
p->uffd_delay, &uffd_args[i],
vcpu_hva, vcpu_alias,
- vcpu_args->pages * perf_test_args.guest_page_size);
+ vcpu_args->pages * memstress_args.guest_page_size);
}
}
pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start);
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
- perf_test_join_vcpu_threads(nr_vcpus);
+ memstress_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
@@ -358,10 +358,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Total guest execution time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
pr_info("Overall demand paging rate: %f pgs/sec\n",
- perf_test_args.vcpu_args[0].pages * nr_vcpus /
+ memstress_args.vcpu_args[0].pages * nr_vcpus /
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
- perf_test_destroy_vm(vm);
+ memstress_destroy_vm(vm);
free(guest_data_prototype);
if (p->uffd_mode) {
@@ -427,8 +427,8 @@ int main(int argc, char *argv[])
p.src_type = parse_backing_src_type(optarg);
break;
case 'v':
- nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ nr_vcpus = atoi_positive("Number of vCPUs", optarg);
+ TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 'o':
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index f99e39a672d3..c33e89012ae6 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -16,7 +16,7 @@
#include "kvm_util.h"
#include "test_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "guest_modes.h"
#ifdef __aarch64__
@@ -67,7 +67,7 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
@@ -128,10 +128,12 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
struct test_params {
unsigned long iterations;
uint64_t phys_offset;
- int wr_fract;
bool partition_vcpu_memory_access;
enum vm_mem_backing_src_type backing_src;
int slots;
+ uint32_t write_percent;
+ uint32_t random_seed;
+ bool random_access;
};
static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
@@ -139,7 +141,7 @@ static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
int i;
for (i = 0; i < slots; i++) {
- int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
vm_mem_region_set_flags(vm, slot, flags);
@@ -161,7 +163,7 @@ static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots
int i;
for (i = 0; i < slots; i++) {
- int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
}
@@ -173,7 +175,7 @@ static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
int i;
for (i = 0; i < slots; i++) {
- int slot = PERF_TEST_MEM_SLOT_INDEX + i;
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
}
@@ -221,11 +223,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec clear_dirty_log_total = (struct timespec){0};
int i;
- vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+ vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src,
p->partition_vcpu_memory_access);
- perf_test_set_wr_fract(vm, p->wr_fract);
+ pr_info("Random seed: %u\n", p->random_seed);
+ memstress_set_random_seed(vm, p->random_seed);
+ memstress_set_write_percent(vm, p->write_percent);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -248,7 +252,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (i = 0; i < nr_vcpus; i++)
vcpu_last_completed_iteration[i] = -1;
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ /*
+ * Use 100% writes during the population phase to ensure all
+ * memory is actually populated and not just mapped to the zero
+ * page. The prevents expensive copy-on-write faults from
+ * occurring during the dirty memory iterations below, which
+ * would pollute the performance results.
+ */
+ memstress_set_write_percent(vm, 100);
+ memstress_set_random_access(vm, false);
+ memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
@@ -269,6 +282,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+ memstress_set_write_percent(vm, p->write_percent);
+ memstress_set_random_access(vm, p->random_access);
+
while (iteration < p->iterations) {
/*
* Incrementing the iteration number will start the vCPUs
@@ -329,7 +345,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* wait for them to exit.
*/
host_quit = true;
- perf_test_join_vcpu_threads(nr_vcpus);
+ memstress_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
@@ -345,16 +361,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm);
- perf_test_destroy_vm(vm);
+ memstress_destroy_vm(vm);
}
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
- "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
- "[-x memslots]\n", name);
+ printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] "
+ "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
+ "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name);
puts("");
+ printf(" -a: access memory randomly rather than in order.\n");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
@@ -373,16 +390,29 @@ static void help(char *name)
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
- printf(" -f: specify the fraction of pages which should be written to\n"
- " as opposed to simply read, in the form\n"
- " 1/<fraction of pages to write>.\n"
- " (default: 1 i.e. all pages are written to.)\n");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
+ printf(" -r: specify the starting random seed.\n");
backing_src_help("-s");
printf(" -x: Split the memory region into this number of memslots.\n"
" (default: 1)\n");
+ printf(" -w: specify the percentage of pages which should be written to\n"
+ " as an integer from 0-100 inclusive. This is probabalistic,\n"
+ " so -w X means each page has an X%% chance of writing\n"
+ " and a (100-X)%% chance of reading.\n"
+ " (default: 100 i.e. all pages are written to.)\n");
+ printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
+ " values (target pCPU), one for each vCPU, plus an optional\n"
+ " entry for the main application task (specified via entry\n"
+ " <nr_vcpus + 1>). If used, entries must be provided for all\n"
+ " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
+ " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
+ " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
+ " ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
+ " To leave the application task unpinned, drop the final entry:\n\n"
+ " ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
+ " (default: no pinning)\n");
puts("");
exit(0);
}
@@ -390,12 +420,14 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ const char *pcpu_list = NULL;
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
- .wr_fract = 1,
.partition_vcpu_memory_access = true,
.backing_src = DEFAULT_VM_MEM_SRC,
.slots = 1,
+ .random_seed = 1,
+ .write_percent = 100,
};
int opt;
@@ -406,55 +438,73 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
+ while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) {
switch (opt) {
+ case 'a':
+ p.random_access = true;
+ break;
+ case 'b':
+ guest_percpu_mem_size = parse_size(optarg);
+ break;
+ case 'c':
+ pcpu_list = optarg;
+ break;
case 'e':
/* 'e' is for evil. */
run_vcpus_while_disabling_dirty_logging = true;
+ break;
case 'g':
dirty_log_manual_caps = 0;
break;
- case 'i':
- p.iterations = atoi(optarg);
+ case 'h':
+ help(argv[0]);
break;
- case 'p':
- p.phys_offset = strtoull(optarg, NULL, 0);
+ case 'i':
+ p.iterations = atoi_positive("Number of iterations", optarg);
break;
case 'm':
guest_modes_cmdline(optarg);
break;
case 'n':
- perf_test_args.nested = true;
- break;
- case 'b':
- guest_percpu_mem_size = parse_size(optarg);
- break;
- case 'f':
- p.wr_fract = atoi(optarg);
- TEST_ASSERT(p.wr_fract >= 1,
- "Write fraction cannot be less than one");
- break;
- case 'v':
- nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
- "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
+ memstress_args.nested = true;
break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
+ case 'p':
+ p.phys_offset = strtoull(optarg, NULL, 0);
+ break;
+ case 'r':
+ p.random_seed = atoi_positive("Random seed", optarg);
+ break;
case 's':
p.backing_src = parse_backing_src_type(optarg);
break;
+ case 'v':
+ nr_vcpus = atoi_positive("Number of vCPUs", optarg);
+ TEST_ASSERT(nr_vcpus <= max_vcpus,
+ "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
+ break;
+ case 'w':
+ p.write_percent = atoi_non_negative("Write percentage", optarg);
+ TEST_ASSERT(p.write_percent <= 100,
+ "Write percentage must be between 0 and 100");
+ break;
case 'x':
- p.slots = atoi(optarg);
+ p.slots = atoi_positive("Number of slots", optarg);
break;
- case 'h':
default:
help(argv[0]);
break;
}
}
+ if (pcpu_list) {
+ kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
+ nr_vcpus);
+ memstress_args.pin_vcpus = true;
+ }
+
TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
pr_info("Test iterations: %"PRIu64"\n", p.iterations);
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index b5234d6efbe1..a38c4369fb8e 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -756,8 +756,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
- ucall_init(vm, NULL);
-
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
sync_global_to_guest(vm, guest_page_size);
@@ -813,7 +811,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
free(bmap);
free(host_bmap_track);
- ucall_uninit(vm);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index e42a09cd24a0..c7685c7038ff 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -16,6 +16,7 @@
#include <linux/kvm.h>
#include "linux/rbtree.h"
+#include <asm/atomic.h>
#include <sys/ioctl.h>
@@ -81,6 +82,7 @@ struct kvm_vm {
struct sparsebit *vpages_mapped;
bool has_irqchip;
bool pgd_created;
+ vm_paddr_t ucall_mmio_addr;
vm_paddr_t pgd;
vm_vaddr_t gdt;
vm_vaddr_t tss;
@@ -383,6 +385,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
+vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
@@ -688,6 +691,10 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
+void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
+void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+ int nr_vcpus);
+
unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
@@ -718,6 +725,19 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
memcpy(&(g), _p, sizeof(g)); \
})
+/*
+ * Write a global value, but only in the VM's (guest's) domain. Primarily used
+ * for "globals" that hold per-VM values (VMs always duplicate code and global
+ * data into their own region of physical memory), but can be used anytime it's
+ * undesirable to change the host's copy of the global.
+ */
+#define write_guest_global(vm, g, val) ({ \
+ typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ typeof(g) _val = val; \
+ \
+ memcpy(_p, &(_val), sizeof(g)); \
+})
+
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
@@ -838,4 +858,13 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
}
+/*
+ * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
+ * to allow for arch-specific setup that is common to all tests, e.g. computing
+ * the default guest "mode".
+ */
+void kvm_selftest_arch_init(void);
+
+void kvm_arch_vm_post_create(struct kvm_vm *vm);
+
#endif /* SELFTEST_KVM_UTIL_BASE_H */
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
new file mode 100644
index 000000000000..bbd2a302df10
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tools/testing/selftests/kvm/include/memstress.h
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_MEMSTRESS_H
+#define SELFTEST_KVM_MEMSTRESS_H
+
+#include <pthread.h>
+
+#include "kvm_util.h"
+
+/* Default guest test virtual memory offset */
+#define DEFAULT_GUEST_TEST_MEM 0xc0000000
+
+#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
+
+#define MEMSTRESS_MEM_SLOT_INDEX 1
+
+struct memstress_vcpu_args {
+ uint64_t gpa;
+ uint64_t gva;
+ uint64_t pages;
+
+ /* Only used by the host userspace part of the vCPU thread */
+ struct kvm_vcpu *vcpu;
+ int vcpu_idx;
+};
+
+struct memstress_args {
+ struct kvm_vm *vm;
+ /* The starting address and size of the guest test region. */
+ uint64_t gpa;
+ uint64_t size;
+ uint64_t guest_page_size;
+ uint32_t random_seed;
+ uint32_t write_percent;
+
+ /* Run vCPUs in L2 instead of L1, if the architecture supports it. */
+ bool nested;
+ /* Randomize which pages are accessed by the guest. */
+ bool random_access;
+ /* True if all vCPUs are pinned to pCPUs */
+ bool pin_vcpus;
+ /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
+ uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
+
+ struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
+};
+
+extern struct memstress_args memstress_args;
+
+struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
+ uint64_t vcpu_memory_bytes, int slots,
+ enum vm_mem_backing_src_type backing_src,
+ bool partition_vcpu_memory_access);
+void memstress_destroy_vm(struct kvm_vm *vm);
+
+void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
+void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
+void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
+
+void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
+void memstress_join_vcpu_threads(int vcpus);
+void memstress_guest_code(uint32_t vcpu_id);
+
+uint64_t memstress_nested_pages(int nr_vcpus);
+void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+
+#endif /* SELFTEST_KVM_MEMSTRESS_H */
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
deleted file mode 100644
index eaa88df0555a..000000000000
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * tools/testing/selftests/kvm/include/perf_test_util.h
- *
- * Copyright (C) 2020, Google LLC.
- */
-
-#ifndef SELFTEST_KVM_PERF_TEST_UTIL_H
-#define SELFTEST_KVM_PERF_TEST_UTIL_H
-
-#include <pthread.h>
-
-#include "kvm_util.h"
-
-/* Default guest test virtual memory offset */
-#define DEFAULT_GUEST_TEST_MEM 0xc0000000
-
-#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
-
-#define PERF_TEST_MEM_SLOT_INDEX 1
-
-struct perf_test_vcpu_args {
- uint64_t gpa;
- uint64_t gva;
- uint64_t pages;
-
- /* Only used by the host userspace part of the vCPU thread */
- struct kvm_vcpu *vcpu;
- int vcpu_idx;
-};
-
-struct perf_test_args {
- struct kvm_vm *vm;
- /* The starting address and size of the guest test region. */
- uint64_t gpa;
- uint64_t size;
- uint64_t guest_page_size;
- int wr_fract;
-
- /* Run vCPUs in L2 instead of L1, if the architecture supports it. */
- bool nested;
-
- struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
-};
-
-extern struct perf_test_args perf_test_args;
-
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
- uint64_t vcpu_memory_bytes, int slots,
- enum vm_mem_backing_src_type backing_src,
- bool partition_vcpu_memory_access);
-void perf_test_destroy_vm(struct kvm_vm *vm);
-
-void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
-
-void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
-void perf_test_join_vcpu_threads(int vcpus);
-void perf_test_guest_code(uint32_t vcpu_id);
-
-uint64_t perf_test_nested_pages(int nr_vcpus);
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
-
-#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index befc754ce9b3..80d6416f3012 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -77,6 +77,13 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
struct timespec timespec_elapsed(struct timespec start);
struct timespec timespec_div(struct timespec ts, int divisor);
+struct guest_random_state {
+ uint32_t seed;
+};
+
+struct guest_random_state new_guest_random_state(uint32_t seed);
+uint32_t guest_random_u32(struct guest_random_state *state);
+
enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS,
VM_MEM_SRC_ANONYMOUS_THP,
@@ -152,4 +159,22 @@ static inline void *align_ptr_up(void *x, size_t size)
return (void *)align_up((unsigned long)x, size);
}
+int atoi_paranoid(const char *num_str);
+
+static inline uint32_t atoi_positive(const char *name, const char *num_str)
+{
+ int num = atoi_paranoid(num_str);
+
+ TEST_ASSERT(num > 0, "%s must be greater than 0, got '%s'", name, num_str);
+ return num;
+}
+
+static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
+{
+ int num = atoi_paranoid(num_str);
+
+ TEST_ASSERT(num >= 0, "%s must be non-negative, got '%s'", name, num_str);
+ return num;
+}
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index ee79d180e07e..bdd373189a77 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -22,12 +22,18 @@ enum {
struct ucall {
uint64_t cmd;
uint64_t args[UCALL_MAX_ARGS];
+
+ /* Host virtual address of this struct. */
+ struct ucall *hva;
};
-void ucall_init(struct kvm_vm *vm, void *arg);
-void ucall_uninit(struct kvm_vm *vm);
+void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
+void ucall_arch_do_ucall(vm_vaddr_t uc);
+void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
+
void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
+void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
diff --git a/tools/testing/selftests/kvm/include/x86_64/evmcs.h b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
index 58db74f68af2..901caf0e0939 100644
--- a/tools/testing/selftests/kvm/include/x86_64/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
@@ -10,6 +10,7 @@
#define SELFTEST_KVM_EVMCS_H
#include <stdint.h>
+#include "hyperv.h"
#include "vmx.h"
#define u16 uint16_t
@@ -20,15 +21,6 @@
extern bool enable_evmcs;
-struct hv_vp_assist_page {
- __u32 apic_assist;
- __u32 reserved;
- __u64 vtl_control[2];
- __u64 nested_enlightenments_control[2];
- __u32 enlighten_vmentry;
- __u64 current_nested_vmcs;
-};
-
struct hv_enlightened_vmcs {
u32 revision_id;
u32 abort;
@@ -41,6 +33,8 @@ struct hv_enlightened_vmcs {
u16 host_gs_selector;
u16 host_tr_selector;
+ u16 padding16_1;
+
u64 host_ia32_pat;
u64 host_ia32_efer;
@@ -159,7 +153,7 @@ struct hv_enlightened_vmcs {
u64 ept_pointer;
u16 virtual_processor_id;
- u16 padding16[3];
+ u16 padding16_2[3];
u64 padding64_2[5];
u64 guest_physical_address;
@@ -195,13 +189,13 @@ struct hv_enlightened_vmcs {
u64 guest_rip;
u32 hv_clean_fields;
- u32 hv_padding_32;
+ u32 padding32_1;
u32 hv_synthetic_controls;
struct {
u32 nested_flush_hypercall:1;
u32 msr_bitmap:1;
u32 reserved:30;
- } hv_enlightenments_control;
+ } __packed hv_enlightenments_control;
u32 hv_vp_id;
u32 padding32_2;
u64 hv_vm_id;
@@ -222,7 +216,7 @@ struct hv_enlightened_vmcs {
u64 host_ssp;
u64 host_ia32_int_ssp_table_addr;
u64 padding64_6;
-};
+} __packed;
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
@@ -243,29 +237,15 @@ struct hv_enlightened_vmcs {
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
-#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
-#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
-#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
extern struct hv_enlightened_vmcs *current_evmcs;
-extern struct hv_vp_assist_page *current_vp_assist;
int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
-static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+static inline void evmcs_enable(void)
{
- u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
- HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
-
- wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
-
- current_vp_assist = vp_assist;
-
enable_evmcs = true;
-
- return 0;
}
static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
@@ -278,6 +258,16 @@ static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
return 0;
}
+static inline bool load_evmcs(struct hyperv_test_pages *hv)
+{
+ if (evmcs_vmptrld(hv->enlightened_vmcs_gpa, hv->enlightened_vmcs))
+ return false;
+
+ current_evmcs->revision_id = EVMCS_VERSION;
+
+ return true;
+}
+
static inline int evmcs_vmptrst(uint64_t *value)
{
*value = current_vp_assist->current_nested_vmcs &
diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
index b66910702c0a..9218bb5f44bf 100644
--- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
@@ -9,6 +9,8 @@
#ifndef SELFTEST_KVM_HYPERV_H
#define SELFTEST_KVM_HYPERV_H
+#include "processor.h"
+
#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
#define HYPERV_CPUID_INTERFACE 0x40000001
#define HYPERV_CPUID_VERSION 0x40000002
@@ -184,5 +186,106 @@
/* hypercall options */
#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+
+/*
+ * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status'
+ * is set to the hypercall status (if no exception occurred).
+ */
+static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
+ vm_vaddr_t output_address,
+ uint64_t *hv_status)
+{
+ uint64_t error_code;
+ uint8_t vector;
+
+ /* Note both the hypercall and the "asm safe" clobber r9-r11. */
+ asm volatile("mov %[output_address], %%r8\n\t"
+ KVM_ASM_SAFE("vmcall")
+ : "=a" (*hv_status),
+ "+c" (control), "+d" (input_address),
+ KVM_ASM_SAFE_OUTPUTS(vector, error_code)
+ : [output_address] "r"(output_address),
+ "a" (-EFAULT)
+ : "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
+ return vector;
+}
+
+/* Issue a Hyper-V hypercall and assert that it succeeded. */
+static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address,
+ vm_vaddr_t output_address)
+{
+ uint64_t hv_status;
+ uint8_t vector;
+
+ vector = __hyperv_hypercall(control, input_address, output_address, &hv_status);
+
+ GUEST_ASSERT(!vector);
+ GUEST_ASSERT((hv_status & 0xffff) == 0);
+}
+
+/* Write 'Fast' hypercall input 'data' to the first 'n_sse_regs' SSE regs */
+static inline void hyperv_write_xmm_input(void *data, int n_sse_regs)
+{
+ int i;
+
+ for (i = 0; i < n_sse_regs; i++)
+ write_sse_reg(i, (sse128_t *)(data + sizeof(sse128_t) * i));
+}
+
+/* Proper HV_X64_MSR_GUEST_OS_ID value */
+#define HYPERV_LINUX_OS_ID ((u64)0x8100 << 48)
+
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+struct hv_nested_enlightenments_control {
+ struct {
+ __u32 directhypercall:1;
+ __u32 reserved:31;
+ } features;
+ struct {
+ __u32 reserved;
+ } hypercallControls;
+} __packed;
+
+/* Define virtual processor assist page structure. */
+struct hv_vp_assist_page {
+ __u32 apic_assist;
+ __u32 reserved1;
+ __u64 vtl_control[3];
+ struct hv_nested_enlightenments_control nested_control;
+ __u8 enlighten_vmentry;
+ __u8 reserved2[7];
+ __u64 current_nested_vmcs;
+} __packed;
+
+extern struct hv_vp_assist_page *current_vp_assist;
+
+int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist);
+
+struct hyperv_test_pages {
+ /* VP assist page */
+ void *vp_assist_hva;
+ uint64_t vp_assist_gpa;
+ void *vp_assist;
+
+ /* Partition assist page */
+ void *partition_assist_hva;
+ uint64_t partition_assist_gpa;
+ void *partition_assist;
+
+ /* Enlightened VMCS */
+ void *enlightened_vmcs_hva;
+ uint64_t enlightened_vmcs_gpa;
+ void *enlightened_vmcs;
+};
+
+struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
+ vm_vaddr_t *p_hv_pages_gva);
#endif /* !SELFTEST_KVM_HYPERV_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index e8ca0d8a6a7e..5d310abe6c3f 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -63,16 +63,21 @@ struct kvm_x86_cpu_feature {
u8 reg;
u8 bit;
};
-#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
-({ \
- struct kvm_x86_cpu_feature feature = { \
- .function = fn, \
- .index = idx, \
- .reg = KVM_CPUID_##gpr, \
- .bit = __bit, \
- }; \
- \
- feature; \
+#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
+({ \
+ struct kvm_x86_cpu_feature feature = { \
+ .function = fn, \
+ .index = idx, \
+ .reg = KVM_CPUID_##gpr, \
+ .bit = __bit, \
+ }; \
+ \
+ static_assert((fn & 0xc0000000) == 0 || \
+ (fn & 0xc0000000) == 0x40000000 || \
+ (fn & 0xc0000000) == 0x80000000 || \
+ (fn & 0xc0000000) == 0xc0000000); \
+ static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
+ feature; \
})
/*
@@ -89,6 +94,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
+#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
#define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
@@ -162,6 +168,102 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
#define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
+/*
+ * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
+ * value/property as opposed to a single-bit feature. Again, pack the info
+ * into a 64-bit value to pass by value with no overhead.
+ */
+struct kvm_x86_cpu_property {
+ u32 function;
+ u8 index;
+ u8 reg;
+ u8 lo_bit;
+ u8 hi_bit;
+};
+#define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
+({ \
+ struct kvm_x86_cpu_property property = { \
+ .function = fn, \
+ .index = idx, \
+ .reg = KVM_CPUID_##gpr, \
+ .lo_bit = low_bit, \
+ .hi_bit = high_bit, \
+ }; \
+ \
+ static_assert(low_bit < high_bit); \
+ static_assert((fn & 0xc0000000) == 0 || \
+ (fn & 0xc0000000) == 0x40000000 || \
+ (fn & 0xc0000000) == 0x80000000 || \
+ (fn & 0xc0000000) == 0xc0000000); \
+ static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
+ property; \
+})
+
+#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
+#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
+#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
+#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
+
+#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
+#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
+#define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
+#define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
+#define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
+#define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
+#define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
+#define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
+#define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
+
+#define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
+
+#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
+#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
+#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
+#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
+
+#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
+
+/*
+ * Intel's architectural PMU events are bizarre. They have a "feature" bit
+ * that indicates the feature is _not_ supported, and a property that states
+ * the length of the bit mask of unsupported features. A feature is supported
+ * if the size of the bit mask is larger than the "unavailable" bit, and said
+ * bit is not set.
+ *
+ * Wrap the "unavailable" feature to simplify checking whether or not a given
+ * architectural event is supported.
+ */
+struct kvm_x86_pmu_feature {
+ struct kvm_x86_cpu_feature anti_feature;
+};
+#define KVM_X86_PMU_FEATURE(name, __bit) \
+({ \
+ struct kvm_x86_pmu_feature feature = { \
+ .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \
+ }; \
+ \
+ feature; \
+})
+
+#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5)
+
+static inline unsigned int x86_family(unsigned int eax)
+{
+ unsigned int x86;
+
+ x86 = (eax >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (eax >> 20) & 0xff;
+
+ return x86;
+}
+
+static inline unsigned int x86_model(unsigned int eax)
+{
+ return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
+}
+
/* Page table bitfield declarations */
#define PTE_PRESENT_MASK BIT_ULL(0)
#define PTE_WRITABLE_MASK BIT_ULL(1)
@@ -172,12 +274,18 @@ struct kvm_x86_cpu_feature {
#define PTE_GLOBAL_MASK BIT_ULL(8)
#define PTE_NX_MASK BIT_ULL(63)
+#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
+
#define PAGE_SHIFT 12
#define PAGE_SIZE (1ULL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
-#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
-#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
+#define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x))
+#define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
+
+#define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
+#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
/* General Registers in 64-Bit Mode */
struct gpr64_regs {
@@ -425,82 +533,143 @@ static inline void cpuid(uint32_t function,
return __cpuid(function, 0, eax, ebx, ecx, edx);
}
-static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
+static inline uint32_t this_cpu_fms(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ return eax;
+}
+
+static inline uint32_t this_cpu_family(void)
+{
+ return x86_family(this_cpu_fms());
+}
+
+static inline uint32_t this_cpu_model(void)
+{
+ return x86_model(this_cpu_fms());
+}
+
+static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
+ uint8_t reg, uint8_t lo, uint8_t hi)
{
uint32_t gprs[4];
- __cpuid(feature.function, feature.index,
+ __cpuid(function, index,
&gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
&gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
- return gprs[feature.reg] & BIT(feature.bit);
+ return (gprs[reg] & GENMASK(hi, lo)) >> lo;
+}
+
+static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
+{
+ return __this_cpu_has(feature.function, feature.index,
+ feature.reg, feature.bit, feature.bit);
}
-#define SET_XMM(__var, __xmm) \
- asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
+static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
+{
+ return __this_cpu_has(property.function, property.index,
+ property.reg, property.lo_bit, property.hi_bit);
+}
-static inline void set_xmm(int n, unsigned long val)
+static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
{
- switch (n) {
+ uint32_t max_leaf;
+
+ switch (property.function & 0xc0000000) {
case 0:
- SET_XMM(val, xmm0);
+ max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
+ break;
+ case 0x40000000:
+ max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
+ break;
+ case 0x80000000:
+ max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
+ break;
+ case 0xc0000000:
+ max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
+ }
+ return max_leaf >= property.function;
+}
+
+static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
+{
+ uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+
+ return nr_bits > feature.anti_feature.bit &&
+ !this_cpu_has(feature.anti_feature);
+}
+
+typedef u32 __attribute__((vector_size(16))) sse128_t;
+#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
+#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
+#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
+
+static inline void read_sse_reg(int reg, sse128_t *data)
+{
+ switch (reg) {
+ case 0:
+ asm("movdqa %%xmm0, %0" : "=m"(*data));
break;
case 1:
- SET_XMM(val, xmm1);
+ asm("movdqa %%xmm1, %0" : "=m"(*data));
break;
case 2:
- SET_XMM(val, xmm2);
+ asm("movdqa %%xmm2, %0" : "=m"(*data));
break;
case 3:
- SET_XMM(val, xmm3);
+ asm("movdqa %%xmm3, %0" : "=m"(*data));
break;
case 4:
- SET_XMM(val, xmm4);
+ asm("movdqa %%xmm4, %0" : "=m"(*data));
break;
case 5:
- SET_XMM(val, xmm5);
+ asm("movdqa %%xmm5, %0" : "=m"(*data));
break;
case 6:
- SET_XMM(val, xmm6);
+ asm("movdqa %%xmm6, %0" : "=m"(*data));
break;
case 7:
- SET_XMM(val, xmm7);
+ asm("movdqa %%xmm7, %0" : "=m"(*data));
break;
+ default:
+ BUG();
}
}
-#define GET_XMM(__xmm) \
-({ \
- unsigned long __val; \
- asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
- __val; \
-})
-
-static inline unsigned long get_xmm(int n)
+static inline void write_sse_reg(int reg, const sse128_t *data)
{
- assert(n >= 0 && n <= 7);
-
- switch (n) {
+ switch (reg) {
case 0:
- return GET_XMM(xmm0);
+ asm("movdqa %0, %%xmm0" : : "m"(*data));
+ break;
case 1:
- return GET_XMM(xmm1);
+ asm("movdqa %0, %%xmm1" : : "m"(*data));
+ break;
case 2:
- return GET_XMM(xmm2);
+ asm("movdqa %0, %%xmm2" : : "m"(*data));
+ break;
case 3:
- return GET_XMM(xmm3);
+ asm("movdqa %0, %%xmm3" : : "m"(*data));
+ break;
case 4:
- return GET_XMM(xmm4);
+ asm("movdqa %0, %%xmm4" : : "m"(*data));
+ break;
case 5:
- return GET_XMM(xmm5);
+ asm("movdqa %0, %%xmm5" : : "m"(*data));
+ break;
case 6:
- return GET_XMM(xmm6);
+ asm("movdqa %0, %%xmm6" : : "m"(*data));
+ break;
case 7:
- return GET_XMM(xmm7);
+ asm("movdqa %0, %%xmm7" : : "m"(*data));
+ break;
+ default:
+ BUG();
}
-
- /* never reached */
- return 0;
}
static inline void cpu_relax(void)
@@ -508,11 +677,6 @@ static inline void cpu_relax(void)
asm volatile("rep; nop" ::: "memory");
}
-#define vmmcall() \
- __asm__ __volatile__( \
- "vmmcall\n" \
- )
-
#define ud2() \
__asm__ __volatile__( \
"ud2\n" \
@@ -526,23 +690,6 @@ static inline void cpu_relax(void)
bool is_intel_cpu(void);
bool is_amd_cpu(void);
-static inline unsigned int x86_family(unsigned int eax)
-{
- unsigned int x86;
-
- x86 = (eax >> 8) & 0xf;
-
- if (x86 == 0xf)
- x86 += (eax >> 20) & 0xff;
-
- return x86;
-}
-
-static inline unsigned int x86_model(unsigned int eax)
-{
- return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
-}
-
struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
void kvm_x86_state_cleanup(struct kvm_x86_state *state);
@@ -604,10 +751,27 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
}
+const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
+ uint32_t function, uint32_t index);
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
+static inline uint32_t kvm_cpu_fms(void)
+{
+ return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
+}
+
+static inline uint32_t kvm_cpu_family(void)
+{
+ return x86_family(kvm_cpu_fms());
+}
+
+static inline uint32_t kvm_cpu_model(void)
+{
+ return x86_model(kvm_cpu_fms());
+}
+
bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
struct kvm_x86_cpu_feature feature);
@@ -616,6 +780,42 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
}
+uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property);
+
+static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
+{
+ return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
+}
+
+static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
+{
+ uint32_t max_leaf;
+
+ switch (property.function & 0xc0000000) {
+ case 0:
+ max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
+ break;
+ case 0x40000000:
+ max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
+ break;
+ case 0x80000000:
+ max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
+ break;
+ case 0xc0000000:
+ max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
+ }
+ return max_leaf >= property.function;
+}
+
+static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
+{
+ uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+
+ return nr_bits > feature.anti_feature.bit &&
+ !kvm_cpu_has(feature.anti_feature);
+}
+
static inline size_t kvm_cpuid2_size(int nr_entries)
{
return sizeof(struct kvm_cpuid2) +
@@ -639,8 +839,6 @@ static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
return cpuid;
}
-const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index);
void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
@@ -701,17 +899,6 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
}
-static inline const struct kvm_cpuid_entry2 *__kvm_get_supported_cpuid_entry(uint32_t function,
- uint32_t index)
-{
- return get_cpuid_entry(kvm_get_supported_cpuid(), function, index);
-}
-
-static inline const struct kvm_cpuid_entry2 *kvm_get_supported_cpuid_entry(uint32_t function)
-{
- return __kvm_get_supported_cpuid_entry(function, 0);
-}
-
uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
@@ -723,15 +910,6 @@ static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
}
-static inline uint32_t kvm_get_cpuid_max_basic(void)
-{
- return kvm_get_supported_cpuid_entry(0)->eax;
-}
-
-static inline uint32_t kvm_get_cpuid_max_extended(void)
-{
- return kvm_get_supported_cpuid_entry(0x80000000)->eax;
-}
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
@@ -748,6 +926,19 @@ struct ex_regs {
uint64_t rflags;
};
+struct idt_entry {
+ uint16_t offset0;
+ uint16_t selector;
+ uint16_t ist : 3;
+ uint16_t : 5;
+ uint16_t type : 4;
+ uint16_t : 1;
+ uint16_t dpl : 2;
+ uint16_t p : 1;
+ uint16_t offset1;
+ uint32_t offset2; uint32_t reserved;
+};
+
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
@@ -764,7 +955,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
* for recursive faults when accessing memory in the handler. The downside to
* using registers is that it restricts what registers can be used by the actual
* instruction. But, selftests are 64-bit only, making register* pressure a
- * minor concern. Use r9-r11 as they are volatile, i.e. don't need* to be saved
+ * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
* by the callee, and except for r11 are not implicit parameters to any
* instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
* parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
@@ -780,39 +971,52 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
*
* REGISTER OUTPUTS:
* r9 = exception vector (non-zero)
+ * r10 = error code
*/
#define KVM_ASM_SAFE(insn) \
"mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
"lea 1f(%%rip), %%r10\n\t" \
"lea 2f(%%rip), %%r11\n\t" \
"1: " insn "\n\t" \
- "movb $0, %[vector]\n\t" \
- "jmp 3f\n\t" \
+ "xor %%r9, %%r9\n\t" \
"2:\n\t" \
"mov %%r9b, %[vector]\n\t" \
- "3:\n\t"
+ "mov %%r10, %[error_code]\n\t"
-#define KVM_ASM_SAFE_OUTPUTS(v) [vector] "=qm"(v)
+#define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
-#define kvm_asm_safe(insn, inputs...) \
-({ \
- uint8_t vector; \
- \
- asm volatile(KVM_ASM_SAFE(insn) \
- : KVM_ASM_SAFE_OUTPUTS(vector) \
- : inputs \
- : KVM_ASM_SAFE_CLOBBERS); \
- vector; \
+#define kvm_asm_safe(insn, inputs...) \
+({ \
+ uint64_t ign_error_code; \
+ uint8_t vector; \
+ \
+ asm volatile(KVM_ASM_SAFE(insn) \
+ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
+ : inputs \
+ : KVM_ASM_SAFE_CLOBBERS); \
+ vector; \
+})
+
+#define kvm_asm_safe_ec(insn, error_code, inputs...) \
+({ \
+ uint8_t vector; \
+ \
+ asm volatile(KVM_ASM_SAFE(insn) \
+ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
+ : inputs \
+ : KVM_ASM_SAFE_CLOBBERS); \
+ vector; \
})
static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
{
+ uint64_t error_code;
uint8_t vector;
uint32_t a, d;
asm volatile(KVM_ASM_SAFE("rdmsr")
- : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector)
+ : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code)
: "c"(msr)
: KVM_ASM_SAFE_CLOBBERS);
@@ -827,10 +1031,9 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
bool kvm_is_tdp_enabled(void);
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t vaddr);
-void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t vaddr, uint64_t pte);
+uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+ int *level);
+uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3);
@@ -882,4 +1085,27 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \
XSTATE_XTILE_DATA_MASK)
+
+#define PFERR_PRESENT_BIT 0
+#define PFERR_WRITE_BIT 1
+#define PFERR_USER_BIT 2
+#define PFERR_RSVD_BIT 3
+#define PFERR_FETCH_BIT 4
+#define PFERR_PK_BIT 5
+#define PFERR_SGX_BIT 15
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
+#define PFERR_IMPLICIT_ACCESS_BIT 48
+
+#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
+#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
+#define PFERR_USER_MASK BIT(PFERR_USER_BIT)
+#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
+#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
+#define PFERR_PK_MASK BIT(PFERR_PK_BIT)
+#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
+#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
+#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm.h b/tools/testing/selftests/kvm/include/x86_64/svm.h
index c8343ff84f7f..4803e1056055 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm.h
@@ -58,6 +58,27 @@ enum {
INTERCEPT_RDPRU,
};
+struct hv_vmcb_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved:29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB
+ */
+#define HV_VMCB_NESTED_ENLIGHTENMENTS (1U << 31)
+
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL 0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
struct __attribute__ ((__packed__)) vmcb_control_area {
u32 intercept_cr;
@@ -106,7 +127,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
* Offset 0x3e0, 32 bytes reserved
* for use by hypervisor/software.
*/
- u8 reserved_sw[32];
+ union {
+ struct hv_vmcb_enlightenments hv_enlightenments;
+ u8 reserved_sw[32];
+ };
};
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
index 7aee6244ab6a..044f0f872ba9 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
@@ -32,6 +32,20 @@ struct svm_test_data {
uint64_t msr_gpa;
};
+static inline void vmmcall(void)
+{
+ /*
+ * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
+ * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
+ * use of this function is to exit to L1 from L2. Clobber all other
+ * GPRs as L1 doesn't correctly preserve them during vmexits.
+ */
+ __asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
+ : : "a"(0xdeadbeef), "c"(0xbeefdead)
+ : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15");
+}
+
#define stgi() \
__asm__ __volatile__( \
"stgi\n" \
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 71b290b6469d..5f0c0a29c556 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -437,11 +437,16 @@ static inline int vmresume(void)
static inline void vmcall(void)
{
- /* Currently, L1 destroys our GPRs during vmexits. */
- __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
- "rax", "rbx", "rcx", "rdx",
- "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
- "r13", "r14", "r15");
+ /*
+ * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
+ * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
+ * use of this function is to exit to L1 from L2. Clobber all other
+ * GPRs as L1 doesn't correctly preserve them during vmexits.
+ */
+ __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp"
+ : : "a"(0xdeadbeef), "c"(0xbeefdead)
+ : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15");
}
static inline int vmread(uint64_t encoding, uint64_t *value)
@@ -517,14 +522,6 @@ struct vmx_pages {
uint64_t vmwrite_gpa;
void *vmwrite;
- void *vp_assist_hva;
- uint64_t vp_assist_gpa;
- void *vp_assist;
-
- void *enlightened_vmcs_hva;
- uint64_t enlightened_vmcs_gpa;
- void *enlightened_vmcs;
-
void *eptp_hva;
uint64_t eptp_gpa;
void *eptp;
@@ -572,7 +569,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
-bool kvm_vm_has_ept(struct kvm_vm *vm);
+bool kvm_cpu_has_ept(void);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index f42c6ac6d71d..b3b00be1ef82 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -289,7 +289,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
/* Export shared structure test_args to guest */
- ucall_init(vm, NULL);
sync_global_to_guest(vm, test_args);
ret = sem_init(&test_stage_updated, 0, 0);
@@ -417,7 +416,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(ret == 0, "Error in sem_destroy");
free(vcpu_threads);
- ucall_uninit(vm);
kvm_vm_free(vm);
}
@@ -461,8 +459,8 @@ int main(int argc, char *argv[])
p.test_mem_size = parse_size(optarg);
break;
case 'v':
- nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ nr_vcpus = atoi_positive("Number of vCPUs", optarg);
+ TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 's':
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 6f5551368944..0de4aabc0c76 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -495,15 +495,6 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
close(kvm_fd);
}
-/*
- * arm64 doesn't have a true default mode, so start by computing the
- * available IPA space and page sizes early.
- */
-void __attribute__((constructor)) init_guest_modes(void)
-{
- guest_modes_append_default();
-}
-
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res)
@@ -528,3 +519,12 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
[arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
}
+
+void kvm_selftest_arch_init(void)
+{
+ /*
+ * arm64 doesn't have a true default mode, so start by computing the
+ * available IPA space and page sizes early.
+ */
+ guest_modes_append_default();
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index ed237b744690..562c16dfbb00 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -6,108 +6,36 @@
*/
#include "kvm_util.h"
+/*
+ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
+ * VM), it must not be accessed from host code.
+ */
static vm_vaddr_t *ucall_exit_mmio_addr;
-static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
-{
- if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
- return false;
-
- virt_pg_map(vm, gpa, gpa);
-
- ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
- sync_global_to_guest(vm, ucall_exit_mmio_addr);
-
- return true;
-}
-
-void ucall_init(struct kvm_vm *vm, void *arg)
+void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
- vm_paddr_t gpa, start, end, step, offset;
- unsigned int bits;
- bool ret;
+ virt_pg_map(vm, mmio_gpa, mmio_gpa);
- if (arg) {
- gpa = (vm_paddr_t)arg;
- ret = ucall_mmio_init(vm, gpa);
- TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
- return;
- }
+ vm->ucall_mmio_addr = mmio_gpa;
- /*
- * Find an address within the allowed physical and virtual address
- * spaces, that does _not_ have a KVM memory region associated with
- * it. Identity mapping an address like this allows the guest to
- * access it, but as KVM doesn't know what to do with it, it
- * will assume it's something userspace handles and exit with
- * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
- * Here we start with a guess that the addresses around 5/8th
- * of the allowed space are unmapped and then work both down and
- * up from there in 1/16th allowed space sized steps.
- *
- * Note, we need to use VA-bits - 1 when calculating the allowed
- * virtual address space for an identity mapping because the upper
- * half of the virtual address space is the two's complement of the
- * lower and won't match physical addresses.
- */
- bits = vm->va_bits - 1;
- bits = min(vm->pa_bits, bits);
- end = 1ul << bits;
- start = end * 5 / 8;
- step = end / 16;
- for (offset = 0; offset < end - start; offset += step) {
- if (ucall_mmio_init(vm, start - offset))
- return;
- if (ucall_mmio_init(vm, start + offset))
- return;
- }
- TEST_FAIL("Can't find a ucall mmio address");
+ write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa);
}
-void ucall_uninit(struct kvm_vm *vm)
+void ucall_arch_do_ucall(vm_vaddr_t uc)
{
- ucall_exit_mmio_addr = 0;
- sync_global_to_guest(vm, ucall_exit_mmio_addr);
+ WRITE_ONCE(*ucall_exit_mmio_addr, uc);
}
-void ucall(uint64_t cmd, int nargs, ...)
-{
- struct ucall uc = {};
- va_list va;
- int i;
-
- WRITE_ONCE(uc.cmd, cmd);
- nargs = min(nargs, UCALL_MAX_ARGS);
-
- va_start(va, nargs);
- for (i = 0; i < nargs; ++i)
- WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
- va_end(va);
-
- WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
-}
-
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- struct ucall ucall = {};
-
- if (uc)
- memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_MMIO &&
- run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
- vm_vaddr_t gva;
-
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
+ run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
"Unexpected ucall exit mmio address access");
- memcpy(&gva, run->mmio.data, sizeof(gva));
- memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall));
-
- vcpu_run_complete_io(vcpu);
- if (uc)
- memcpy(uc, &ucall, sizeof(ucall));
+ return (void *)(*((uint64_t *)run->mmio.data));
}
- return ucall.cmd;
+ return NULL;
}
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 9f54c098d9d0..d71a9a5974de 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -138,7 +138,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
offset = hdr.e_phoff + (n1 * hdr.e_phentsize);
offset_rv = lseek(fd, offset, SEEK_SET);
TEST_ASSERT(offset_rv == offset,
- "Failed to seek to begining of program header %u,\n"
+ "Failed to seek to beginning of program header %u,\n"
" filename: %s\n"
" rv: %jd errno: %i",
n1, filename, (intmax_t) offset_rv, errno);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f1cb1627161f..1d26a2160178 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -11,6 +11,7 @@
#include "processor.h"
#include <assert.h>
+#include <sched.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -334,15 +335,24 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
{
uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
nr_extra_pages);
+ struct userspace_mem_region *slot0;
struct kvm_vm *vm;
vm = ____vm_create(mode, nr_pages);
kvm_vm_elf_load(vm, program_invocation_name);
-#ifdef __x86_64__
- vm_create_irqchip(vm);
-#endif
+ /*
+ * TODO: Add proper defines to protect the library's memslots, and then
+ * carve out memslot1 for the ucall MMIO address. KVM treats writes to
+ * read-only memslots as MMIO, and creating a read-only memslot for the
+ * MMIO region would prevent silently clobbering the MMIO region.
+ */
+ slot0 = memslot2region(vm, 0);
+ ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
+
+ kvm_arch_vm_post_create(vm);
+
return vm;
}
@@ -443,6 +453,59 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
return vm_vcpu_recreate(vm, 0);
}
+void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
+{
+ cpu_set_t mask;
+ int r;
+
+ CPU_ZERO(&mask);
+ CPU_SET(pcpu, &mask);
+ r = sched_setaffinity(0, sizeof(mask), &mask);
+ TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu);
+}
+
+static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
+{
+ uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
+
+ TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
+ "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu);
+ return pcpu;
+}
+
+void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+ int nr_vcpus)
+{
+ cpu_set_t allowed_mask;
+ char *cpu, *cpu_list;
+ char delim[2] = ",";
+ int i, r;
+
+ cpu_list = strdup(pcpus_string);
+ TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
+
+ r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
+ TEST_ASSERT(!r, "sched_getaffinity() failed");
+
+ cpu = strtok(cpu_list, delim);
+
+ /* 1. Get all pcpus for vcpus. */
+ for (i = 0; i < nr_vcpus; i++) {
+ TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i);
+ vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
+ cpu = strtok(NULL, delim);
+ }
+
+ /* 2. Check if the main worker needs to be pinned. */
+ if (cpu) {
+ kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
+ cpu = strtok(NULL, delim);
+ }
+
+ TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
+ free(cpu_list);
+}
+
/*
* Userspace Memory Region Find
*
@@ -1151,8 +1214,8 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
* TEST_ASSERT failure occurs for invalid input or no area of at least
* sz unallocated bytes >= vaddr_min is available.
*/
-static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min)
+vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
+ vm_vaddr_t vaddr_min)
{
uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
@@ -1256,8 +1319,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
virt_pg_map(vm, vaddr, paddr);
- sparsebit_set(vm->vpages_mapped,
- vaddr >> vm->page_shift);
+ sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
return vaddr_start;
@@ -1330,6 +1392,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
virt_pg_map(vm, vaddr, paddr);
vaddr += page_size;
paddr += page_size;
+
+ sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
}
@@ -2021,3 +2085,19 @@ void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
break;
}
}
+
+__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
+{
+}
+
+__weak void kvm_selftest_arch_init(void)
+{
+}
+
+void __attribute((constructor)) kvm_selftest_init(void)
+{
+ /* Tell stdout not to buffer its content. */
+ setbuf(stdout, NULL);
+
+ kvm_selftest_arch_init();
+}
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/memstress.c
index 9618b37c66f7..2de8a5d527b3 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -2,13 +2,15 @@
/*
* Copyright (C) 2020, Google LLC.
*/
+#define _GNU_SOURCE
+
#include <inttypes.h>
#include "kvm_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "processor.h"
-struct perf_test_args perf_test_args;
+struct memstress_args memstress_args;
/*
* Guest virtual memory offset of the testing memory slot.
@@ -31,7 +33,7 @@ struct vcpu_thread {
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */
-static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
+static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
@@ -42,14 +44,19 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
-void perf_test_guest_code(uint32_t vcpu_idx)
+void memstress_guest_code(uint32_t vcpu_idx)
{
- struct perf_test_args *pta = &perf_test_args;
- struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_idx];
+ struct memstress_args *args = &memstress_args;
+ struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
+ struct guest_random_state rand_state;
uint64_t gva;
uint64_t pages;
+ uint64_t addr;
+ uint64_t page;
int i;
+ rand_state = new_guest_random_state(args->random_seed + vcpu_idx);
+
gva = vcpu_args->gva;
pages = vcpu_args->pages;
@@ -58,9 +65,14 @@ void perf_test_guest_code(uint32_t vcpu_idx)
while (true) {
for (i = 0; i < pages; i++) {
- uint64_t addr = gva + (i * pta->guest_page_size);
+ if (args->random_access)
+ page = guest_random_u32(&rand_state) % pages;
+ else
+ page = i;
- if (i % pta->wr_fract == 0)
+ addr = gva + (page * args->guest_page_size);
+
+ if (guest_random_u32(&rand_state) % 100 < args->write_percent)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
@@ -70,17 +82,17 @@ void perf_test_guest_code(uint32_t vcpu_idx)
}
}
-void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
+void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
- struct perf_test_args *pta = &perf_test_args;
- struct perf_test_vcpu_args *vcpu_args;
+ struct memstress_args *args = &memstress_args;
+ struct memstress_vcpu_args *vcpu_args;
int i;
for (i = 0; i < nr_vcpus; i++) {
- vcpu_args = &pta->vcpu_args[i];
+ vcpu_args = &args->vcpu_args[i];
vcpu_args->vcpu = vcpus[i];
vcpu_args->vcpu_idx = i;
@@ -89,29 +101,29 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
vcpu_args->gva = guest_test_virt_mem +
(i * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
- pta->guest_page_size;
- vcpu_args->gpa = pta->gpa + (i * vcpu_memory_bytes);
+ args->guest_page_size;
+ vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
- pta->guest_page_size;
- vcpu_args->gpa = pta->gpa;
+ args->guest_page_size;
+ vcpu_args->gpa = args->gpa;
}
vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa +
- (vcpu_args->pages * pta->guest_page_size));
+ (vcpu_args->pages * args->guest_page_size));
}
}
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
+struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
- struct perf_test_args *pta = &perf_test_args;
+ struct memstress_args *args = &memstress_args;
struct kvm_vm *vm;
uint64_t guest_num_pages, slot0_pages = 0;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
@@ -121,20 +133,20 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
/* By default vCPUs will write to memory. */
- pta->wr_fract = 1;
+ args->write_percent = 100;
/*
* Snapshot the non-huge page size. This is used by the guest code to
* access/dirty pages at the logging granularity.
*/
- pta->guest_page_size = vm_guest_mode_params[mode].page_size;
+ args->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
- (nr_vcpus * vcpu_memory_bytes) / pta->guest_page_size);
+ (nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
- TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0,
+ TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0,
"Guest memory cannot be evenly divided into %d slots.",
@@ -144,8 +156,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* If using nested, allocate extra pages for the nested page tables and
* in-memory data structures.
*/
- if (pta->nested)
- slot0_pages += perf_test_nested_pages(nr_vcpus);
+ if (args->nested)
+ slot0_pages += memstress_nested_pages(nr_vcpus);
/*
* Pass guest_num_pages to populate the page tables for test memory.
@@ -153,9 +165,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
- perf_test_guest_code, vcpus);
+ memstress_guest_code, vcpus);
- pta->vm = vm;
+ args->vm = vm;
/* Put the test region at the top guest physical memory. */
region_end_gfn = vm->max_gfn + 1;
@@ -165,8 +177,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* When running vCPUs in L2, restrict the test region to 48 bits to
* avoid needing 5-level page tables to identity map L2.
*/
- if (pta->nested)
- region_end_gfn = min(region_end_gfn, (1UL << 48) / pta->guest_page_size);
+ if (args->nested)
+ region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
#endif
/*
* If there should be more memory in the guest test region than there
@@ -178,63 +190,72 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
" nr_vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
- pta->gpa = (region_end_gfn - guest_num_pages - 1) * pta->guest_page_size;
- pta->gpa = align_down(pta->gpa, backing_src_pagesz);
+ args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
+ args->gpa = align_down(args->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
- pta->gpa = align_down(pta->gpa, 1 << 20);
+ args->gpa = align_down(args->gpa, 1 << 20);
#endif
- pta->size = guest_num_pages * pta->guest_page_size;
+ args->size = guest_num_pages * args->guest_page_size;
pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
- pta->gpa, pta->gpa + pta->size);
+ args->gpa, args->gpa + args->size);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
uint64_t region_pages = guest_num_pages / slots;
- vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i;
+ vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
- PERF_TEST_MEM_SLOT_INDEX + i,
+ MEMSTRESS_MEM_SLOT_INDEX + i,
region_pages, 0);
}
/* Do mapping for the demand paging memory slot */
- virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
+ virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
- perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
+ memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
partition_vcpu_memory_access);
- if (pta->nested) {
+ if (args->nested) {
pr_info("Configuring vCPUs to run in L2 (nested).\n");
- perf_test_setup_nested(vm, nr_vcpus, vcpus);
+ memstress_setup_nested(vm, nr_vcpus, vcpus);
}
- ucall_init(vm, NULL);
-
/* Export the shared variables to the guest. */
- sync_global_to_guest(vm, perf_test_args);
+ sync_global_to_guest(vm, memstress_args);
return vm;
}
-void perf_test_destroy_vm(struct kvm_vm *vm)
+void memstress_destroy_vm(struct kvm_vm *vm)
{
- ucall_uninit(vm);
kvm_vm_free(vm);
}
-void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
+void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
+{
+ memstress_args.write_percent = write_percent;
+ sync_global_to_guest(vm, memstress_args.write_percent);
+}
+
+void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
+{
+ memstress_args.random_seed = random_seed;
+ sync_global_to_guest(vm, memstress_args.random_seed);
+}
+
+void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
{
- perf_test_args.wr_fract = wr_fract;
- sync_global_to_guest(vm, perf_test_args);
+ memstress_args.random_access = random_access;
+ sync_global_to_guest(vm, memstress_args.random_access);
}
-uint64_t __weak perf_test_nested_pages(int nr_vcpus)
+uint64_t __weak memstress_nested_pages(int nr_vcpus)
{
return 0;
}
-void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
+void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{
pr_info("%s() not support on this architecture, skipping.\n", __func__);
exit(KSFT_SKIP);
@@ -243,6 +264,10 @@ void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_v
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
+ int vcpu_idx = vcpu->vcpu_idx;
+
+ if (memstress_args.pin_vcpus)
+ kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true);
@@ -255,13 +280,13 @@ static void *vcpu_thread_main(void *data)
while (!READ_ONCE(all_vcpu_threads_running))
;
- vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_idx]);
+ vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
return NULL;
}
-void perf_test_start_vcpu_threads(int nr_vcpus,
- void (*vcpu_fn)(struct perf_test_vcpu_args *))
+void memstress_start_vcpu_threads(int nr_vcpus,
+ void (*vcpu_fn)(struct memstress_vcpu_args *))
{
int i;
@@ -285,7 +310,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus,
WRITE_ONCE(all_vcpu_threads_running, true);
}
-void perf_test_join_vcpu_threads(int nr_vcpus)
+void memstress_join_vcpu_threads(int nr_vcpus)
{
int i;
diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c
index 087b9740bc8f..9a3476a2dfca 100644
--- a/tools/testing/selftests/kvm/lib/riscv/ucall.c
+++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c
@@ -10,11 +10,7 @@
#include "kvm_util.h"
#include "processor.h"
-void ucall_init(struct kvm_vm *vm, void *arg)
-{
-}
-
-void ucall_uninit(struct kvm_vm *vm)
+void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}
@@ -44,47 +40,22 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
return ret;
}
-void ucall(uint64_t cmd, int nargs, ...)
+void ucall_arch_do_ucall(vm_vaddr_t uc)
{
- struct ucall uc = {
- .cmd = cmd,
- };
- va_list va;
- int i;
-
- nargs = min(nargs, UCALL_MAX_ARGS);
-
- va_start(va, nargs);
- for (i = 0; i < nargs; ++i)
- uc.args[i] = va_arg(va, uint64_t);
- va_end(va);
-
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
KVM_RISCV_SELFTESTS_SBI_UCALL,
- (vm_vaddr_t)&uc, 0, 0, 0, 0, 0);
+ uc, 0, 0, 0, 0, 0);
}
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- struct ucall ucall = {};
-
- if (uc)
- memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_RISCV_SBI &&
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL:
- memcpy(&ucall,
- addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]),
- sizeof(ucall));
-
- vcpu_run_complete_io(vcpu);
- if (uc)
- memcpy(uc, &ucall, sizeof(ucall));
-
- break;
+ return (void *)run->riscv_sbi.args[0];
case KVM_RISCV_SELFTESTS_SBI_UNEXP:
vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest");
@@ -93,6 +64,5 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
break;
}
}
-
- return ucall.cmd;
+ return NULL;
}
diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c
index 73dc4e21190f..a7f02dc372cf 100644
--- a/tools/testing/selftests/kvm/lib/s390x/ucall.c
+++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c
@@ -6,40 +6,19 @@
*/
#include "kvm_util.h"
-void ucall_init(struct kvm_vm *vm, void *arg)
+void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}
-void ucall_uninit(struct kvm_vm *vm)
+void ucall_arch_do_ucall(vm_vaddr_t uc)
{
-}
-
-void ucall(uint64_t cmd, int nargs, ...)
-{
- struct ucall uc = {
- .cmd = cmd,
- };
- va_list va;
- int i;
-
- nargs = min(nargs, UCALL_MAX_ARGS);
-
- va_start(va, nargs);
- for (i = 0; i < nargs; ++i)
- uc.args[i] = va_arg(va, uint64_t);
- va_end(va);
-
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
- asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
+ asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
}
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- struct ucall ucall = {};
-
- if (uc)
- memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 &&
@@ -47,13 +26,7 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
- memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]),
- sizeof(ucall));
-
- vcpu_run_complete_io(vcpu);
- if (uc)
- memcpy(uc, &ucall, sizeof(ucall));
+ return (void *)run->s.regs.gprs[reg];
}
-
- return ucall.cmd;
+ return NULL;
}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 6d23878bbfe1..5c22fa4c2825 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -18,6 +18,23 @@
#include "test_util.h"
/*
+ * Random number generator that is usable from guest code. This is the
+ * Park-Miller LCG using standard constants.
+ */
+
+struct guest_random_state new_guest_random_state(uint32_t seed)
+{
+ struct guest_random_state s = {.seed = seed};
+ return s;
+}
+
+uint32_t guest_random_u32(struct guest_random_state *state)
+{
+ state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1);
+ return state->seed;
+}
+
+/*
* Parses "[0-9]+[kmgt]?".
*/
size_t parse_size(const char *size)
@@ -334,3 +351,22 @@ long get_run_delay(void)
return val[1];
}
+
+int atoi_paranoid(const char *num_str)
+{
+ char *end_ptr;
+ long num;
+
+ errno = 0;
+ num = strtol(num_str, &end_ptr, 0);
+ TEST_ASSERT(!errno, "strtol(\"%s\") failed", num_str);
+ TEST_ASSERT(num_str != end_ptr,
+ "strtol(\"%s\") didn't find a valid integer.", num_str);
+ TEST_ASSERT(*end_ptr == '\0',
+ "strtol(\"%s\") failed to parse trailing characters \"%s\".",
+ num_str, end_ptr);
+ TEST_ASSERT(num >= INT_MIN && num <= INT_MAX,
+ "%ld not in range of [%d, %d]", num, INT_MIN, INT_MAX);
+
+ return num;
+}
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
new file mode 100644
index 000000000000..fcae96461e46
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "kvm_util.h"
+#include "linux/types.h"
+#include "linux/bitmap.h"
+#include "linux/atomic.h"
+
+struct ucall_header {
+ DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
+ struct ucall ucalls[KVM_MAX_VCPUS];
+};
+
+/*
+ * ucall_pool holds per-VM values (global data is duplicated by each VM), it
+ * must not be accessed from host code.
+ */
+static struct ucall_header *ucall_pool;
+
+void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+{
+ struct ucall_header *hdr;
+ struct ucall *uc;
+ vm_vaddr_t vaddr;
+ int i;
+
+ vaddr = vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR);
+ hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
+ memset(hdr, 0, sizeof(*hdr));
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ uc = &hdr->ucalls[i];
+ uc->hva = uc;
+ }
+
+ write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
+
+ ucall_arch_init(vm, mmio_gpa);
+}
+
+static struct ucall *ucall_alloc(void)
+{
+ struct ucall *uc;
+ int i;
+
+ GUEST_ASSERT(ucall_pool);
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (!atomic_test_and_set_bit(i, ucall_pool->in_use)) {
+ uc = &ucall_pool->ucalls[i];
+ memset(uc->args, 0, sizeof(uc->args));
+ return uc;
+ }
+ }
+
+ GUEST_ASSERT(0);
+ return NULL;
+}
+
+static void ucall_free(struct ucall *uc)
+{
+ /* Beware, here be pointer arithmetic. */
+ clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
+}
+
+void ucall(uint64_t cmd, int nargs, ...)
+{
+ struct ucall *uc;
+ va_list va;
+ int i;
+
+ uc = ucall_alloc();
+
+ WRITE_ONCE(uc->cmd, cmd);
+
+ nargs = min(nargs, UCALL_MAX_ARGS);
+
+ va_start(va, nargs);
+ for (i = 0; i < nargs; ++i)
+ WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
+ va_end(va);
+
+ ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+
+ ucall_free(uc);
+}
+
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+{
+ struct ucall ucall;
+ void *addr;
+
+ if (!uc)
+ uc = &ucall;
+
+ addr = ucall_arch_get_ucall(vcpu);
+ if (addr) {
+ memcpy(uc, addr, sizeof(*uc));
+ vcpu_run_complete_io(vcpu);
+ } else {
+ memset(uc, 0, sizeof(*uc));
+ }
+
+ return uc->cmd;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
new file mode 100644
index 000000000000..efb7e7a1354d
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hyper-V specific functions.
+ *
+ * Copyright (C) 2021, Red Hat Inc.
+ */
+#include <stdint.h>
+#include "processor.h"
+#include "hyperv.h"
+
+struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
+ vm_vaddr_t *p_hv_pages_gva)
+{
+ vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm);
+ struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva);
+
+ /* Setup of a region of guest memory for the VP Assist page. */
+ hv->vp_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist);
+ hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist);
+
+ /* Setup of a region of guest memory for the partition assist page. */
+ hv->partition_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist);
+ hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist);
+
+ /* Setup of a region of guest memory for the enlightened VMCS. */
+ hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
+ hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs);
+ hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs);
+
+ *p_hv_pages_gva = hv_pages_gva;
+ return hv;
+}
+
+int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+{
+ uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+ HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+ wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
+
+ current_vp_assist = vp_assist;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/memstress.c
index 0f344a7c89c4..d61e623afc8c 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/memstress.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * x86_64-specific extensions to perf_test_util.c.
+ * x86_64-specific extensions to memstress.c.
*
* Copyright (C) 2022, Google, Inc.
*/
@@ -11,25 +11,25 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "processor.h"
#include "vmx.h"
-void perf_test_l2_guest_code(uint64_t vcpu_id)
+void memstress_l2_guest_code(uint64_t vcpu_id)
{
- perf_test_guest_code(vcpu_id);
+ memstress_guest_code(vcpu_id);
vmcall();
}
-extern char perf_test_l2_guest_entry[];
+extern char memstress_l2_guest_entry[];
__asm__(
-"perf_test_l2_guest_entry:"
+"memstress_l2_guest_entry:"
" mov (%rsp), %rdi;"
-" call perf_test_l2_guest_code;"
+" call memstress_l2_guest_code;"
" ud2;"
);
-static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
@@ -42,14 +42,14 @@ static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
*rsp = vcpu_id;
- prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
+ prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE();
}
-uint64_t perf_test_nested_pages(int nr_vcpus)
+uint64_t memstress_nested_pages(int nr_vcpus)
{
/*
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G
@@ -59,7 +59,7 @@ uint64_t perf_test_nested_pages(int nr_vcpus)
return 513 + 10 * nr_vcpus;
}
-void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
+void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
{
uint64_t start, end;
@@ -72,12 +72,12 @@ void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
*/
nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
- start = align_down(perf_test_args.gpa, PG_SIZE_1G);
- end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
+ start = align_down(memstress_args.gpa, PG_SIZE_1G);
+ end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
nested_identity_map_1g(vmx, vm, start, end - start);
}
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
+void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{
struct vmx_pages *vmx, *vmx0 = NULL;
struct kvm_regs regs;
@@ -85,12 +85,13 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
int vcpu_id;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has_ept());
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vmx = vcpu_alloc_vmx(vm, &vmx_gva);
if (vcpu_id == 0) {
- perf_test_setup_ept(vmx, vm);
+ memstress_setup_ept(vmx, vm);
vmx0 = vmx;
} else {
/* Share the same EPT table across all vCPUs. */
@@ -100,11 +101,11 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
}
/*
- * Override the vCPU to run perf_test_l1_guest_code() which will
- * bounce it into L2 before calling perf_test_guest_code().
+ * Override the vCPU to run memstress_l1_guest_code() which will
+ * bounce it into L2 before calling memstress_guest_code().
*/
vcpu_regs_get(vcpus[vcpu_id], &regs);
- regs.rip = (unsigned long) perf_test_l1_guest_code;
+ regs.rip = (unsigned long) memstress_l1_guest_code;
vcpu_regs_set(vcpus[vcpu_id], &regs);
vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 39c4409ef56a..d532c20c74fd 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -131,23 +131,28 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
}
}
-static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
- int level)
+static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
+ uint64_t vaddr, int level)
{
- uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
+ uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
+ uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
+ TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd,
+ "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
+ level + 1, vaddr);
+
return &page_table[index];
}
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
- uint64_t pt_pfn,
+ uint64_t *parent_pte,
uint64_t vaddr,
uint64_t paddr,
int current_level,
int target_level)
{
- uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
+ uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
@@ -197,21 +202,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
* Allocate upper level page tables, if not already present. Return
* early if a hugepage was created.
*/
- pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
- vaddr, paddr, PG_LEVEL_512G, level);
+ pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
if (*pml4e & PTE_LARGE_MASK)
return;
- pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
+ pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level);
if (*pdpe & PTE_LARGE_MASK)
return;
- pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
+ pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level);
if (*pde & PTE_LARGE_MASK)
return;
/* Fill in page table entry. */
- pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
+ pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -241,30 +245,25 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
}
}
-static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
- struct kvm_vcpu *vcpu,
- uint64_t vaddr)
+static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
{
- uint16_t index[4];
- uint64_t *pml4e, *pdpe, *pde;
- uint64_t *pte;
- struct kvm_sregs sregs;
- uint64_t rsvd_mask = 0;
+ if (*pte & PTE_LARGE_MASK) {
+ TEST_ASSERT(*level == PG_LEVEL_NONE ||
+ *level == current_level,
+ "Unexpected hugepage at level %d\n", current_level);
+ *level = current_level;
+ }
- /* Set the high bits in the reserved mask. */
- if (vm->pa_bits < 52)
- rsvd_mask = GENMASK_ULL(51, vm->pa_bits);
+ return *level == current_level;
+}
- /*
- * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
- * with 4-Level Paging and 5-Level Paging".
- * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
- * the XD flag (bit 63) is reserved.
- */
- vcpu_sregs_get(vcpu, &sregs);
- if ((sregs.efer & EFER_NX) == 0) {
- rsvd_mask |= PTE_NX_MASK;
- }
+uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+ int *level)
+{
+ uint64_t *pml4e, *pdpe, *pde;
+
+ TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
+ "Invalid PG_LEVEL_* '%d'", *level);
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -279,54 +278,26 @@ static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
"Canonical check failed. The virtual address is invalid.");
- index[0] = (vaddr >> 12) & 0x1ffu;
- index[1] = (vaddr >> 21) & 0x1ffu;
- index[2] = (vaddr >> 30) & 0x1ffu;
- index[3] = (vaddr >> 39) & 0x1ffu;
+ pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
+ if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
+ return pml4e;
- pml4e = addr_gpa2hva(vm, vm->pgd);
- TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
- "Expected pml4e to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
- "Unexpected reserved bits set.");
+ pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
+ if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
+ return pdpe;
- pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
- TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
- "Expected pdpe to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
- "Expected pdpe to map a pde not a 1-GByte page.");
- TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
- "Unexpected reserved bits set.");
+ pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
+ if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
+ return pde;
- pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
- TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
- "Expected pde to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
- "Expected pde to map a pte not a 2-MByte page.");
- TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
- "Unexpected reserved bits set.");
-
- pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
- TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
- "Expected pte to be present for gva: 0x%08lx", vaddr);
-
- return &pte[index[0]];
+ return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
}
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t vaddr)
+uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
{
- uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
+ int level = PG_LEVEL_4K;
- return *(uint64_t *)pte;
-}
-
-void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t vaddr, uint64_t pte)
-{
- uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
-
- *(uint64_t *)new_pte = pte;
+ return __vm_get_page_table_entry(vm, vaddr, &level);
}
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
@@ -512,41 +483,17 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
- uint16_t index[4];
- uint64_t *pml4e, *pdpe, *pde;
- uint64_t *pte;
-
- TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
- "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
-
- index[0] = (gva >> 12) & 0x1ffu;
- index[1] = (gva >> 21) & 0x1ffu;
- index[2] = (gva >> 30) & 0x1ffu;
- index[3] = (gva >> 39) & 0x1ffu;
+ int level = PG_LEVEL_NONE;
+ uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
- if (!vm->pgd_created)
- goto unmapped_gva;
- pml4e = addr_gpa2hva(vm, vm->pgd);
- if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
- goto unmapped_gva;
+ TEST_ASSERT(*pte & PTE_PRESENT_MASK,
+ "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
- pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
- if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
- goto unmapped_gva;
-
- pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
- if (!(pde[index[1]] & PTE_PRESENT_MASK))
- goto unmapped_gva;
-
- pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
- if (!(pte[index[0]] & PTE_PRESENT_MASK))
- goto unmapped_gva;
-
- return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
-
-unmapped_gva:
- TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
- exit(EXIT_FAILURE);
+ /*
+ * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
+ * address bits to be zero.
+ */
+ return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
}
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
@@ -639,6 +586,11 @@ void __vm_xsave_require_permission(int bit, const char *name)
bitmask);
}
+void kvm_arch_vm_post_create(struct kvm_vm *vm)
+{
+ vm_create_irqchip(vm);
+}
+
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
@@ -700,8 +652,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return cpuid;
}
-bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
- struct kvm_x86_cpu_feature feature)
+static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
+ uint32_t function, uint32_t index,
+ uint8_t reg, uint8_t lo, uint8_t hi)
{
const struct kvm_cpuid_entry2 *entry;
int i;
@@ -714,12 +667,25 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
* order, but kvm_x86_cpu_feature matches that mess, so yay
* pointer shenanigans!
*/
- if (entry->function == feature.function &&
- entry->index == feature.index)
- return (&entry->eax)[feature.reg] & BIT(feature.bit);
+ if (entry->function == function && entry->index == index)
+ return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
}
- return false;
+ return 0;
+}
+
+bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_feature feature)
+{
+ return __kvm_cpu_has(cpuid, feature.function, feature.index,
+ feature.reg, feature.bit, feature.bit);
+}
+
+uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property)
+{
+ return __kvm_cpu_has(cpuid, property.function, property.index,
+ property.reg, property.lo_bit, property.hi_bit);
}
uint64_t kvm_get_feature_msr(uint64_t msr_index)
@@ -1059,34 +1025,15 @@ bool is_amd_cpu(void)
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{
- const struct kvm_cpuid_entry2 *entry;
- bool pae;
-
- /* SDM 4.1.4 */
- if (kvm_get_cpuid_max_extended() < 0x80000008) {
- pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
- *pa_bits = pae ? 36 : 32;
+ if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
+ *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
*va_bits = 32;
} else {
- entry = kvm_get_supported_cpuid_entry(0x80000008);
- *pa_bits = entry->eax & 0xff;
- *va_bits = (entry->eax >> 8) & 0xff;
+ *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
+ *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
}
}
-struct idt_entry {
- uint16_t offset0;
- uint16_t selector;
- uint16_t ist : 3;
- uint16_t : 5;
- uint16_t type : 4;
- uint16_t : 1;
- uint16_t dpl : 2;
- uint16_t p : 1;
- uint16_t offset1;
- uint32_t offset2; uint32_t reserved;
-};
-
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
int dpl, unsigned short selector)
{
@@ -1116,6 +1063,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
regs->rip = regs->r11;
regs->r9 = regs->vector;
+ regs->r10 = regs->error_code;
return true;
}
@@ -1278,7 +1226,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
- uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+ uint8_t maxphyaddr;
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
@@ -1292,8 +1240,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
/* Before family 17h, the HyperTransport area is just below 1T. */
ht_gfn = (1 << 28) - num_ht_pages;
- cpuid(1, &eax, &ebx, &ecx, &edx);
- if (x86_family(eax) < 0x17)
+ if (this_cpu_family() < 0x17)
goto done;
/*
@@ -1301,17 +1248,14 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
* reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
- cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
- max_ext_leaf = eax;
- if (max_ext_leaf < 0x80000008)
+ if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
goto done;
- cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
- max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
- if (max_ext_leaf >= 0x8000001f) {
- cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
- max_pfn >>= (ebx >> 6) & 0x3f;
- }
+ maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
+ max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
+
+ if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
+ max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
ht_gfn = max_pfn - num_ht_pages;
done:
diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
index e5f0f9e0d3ee..4d41dc63cc9e 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
@@ -8,52 +8,25 @@
#define UCALL_PIO_PORT ((uint16_t)0x1000)
-void ucall_init(struct kvm_vm *vm, void *arg)
+void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}
-void ucall_uninit(struct kvm_vm *vm)
+void ucall_arch_do_ucall(vm_vaddr_t uc)
{
-}
-
-void ucall(uint64_t cmd, int nargs, ...)
-{
- struct ucall uc = {
- .cmd = cmd,
- };
- va_list va;
- int i;
-
- nargs = min(nargs, UCALL_MAX_ARGS);
-
- va_start(va, nargs);
- for (i = 0; i < nargs; ++i)
- uc.args[i] = va_arg(va, uint64_t);
- va_end(va);
-
asm volatile("in %[port], %%al"
- : : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory");
+ : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory");
}
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- struct ucall ucall = {};
-
- if (uc)
- memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vcpu, &regs);
- memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi),
- sizeof(ucall));
-
- vcpu_run_complete_io(vcpu);
- if (uc)
- memcpy(uc, &ucall, sizeof(ucall));
+ return (void *)regs.rdi;
}
-
- return ucall.cmd;
+ return NULL;
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d21049c38fc5..59d97531c9b1 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -109,18 +109,6 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
- /* Setup of a region of guest memory for the VP Assist page. */
- vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
- vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
- vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
-
- /* Setup of a region of guest memory for the enlightened VMCS. */
- vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
- vmx->enlightened_vmcs_hva =
- addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
- vmx->enlightened_vmcs_gpa =
- addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
-
*p_vmx_gva = vmx_gva;
return vmx;
}
@@ -171,26 +159,18 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
bool load_vmcs(struct vmx_pages *vmx)
{
- if (!enable_evmcs) {
- /* Load a VMCS. */
- *(uint32_t *)(vmx->vmcs) = vmcs_revision();
- if (vmclear(vmx->vmcs_gpa))
- return false;
-
- if (vmptrld(vmx->vmcs_gpa))
- return false;
-
- /* Setup shadow VMCS, do not load it yet. */
- *(uint32_t *)(vmx->shadow_vmcs) =
- vmcs_revision() | 0x80000000ul;
- if (vmclear(vmx->shadow_vmcs_gpa))
- return false;
- } else {
- if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
- vmx->enlightened_vmcs))
- return false;
- current_evmcs->revision_id = EVMCS_VERSION;
- }
+ /* Load a VMCS. */
+ *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ if (vmclear(vmx->vmcs_gpa))
+ return false;
+
+ if (vmptrld(vmx->vmcs_gpa))
+ return false;
+
+ /* Setup shadow VMCS, do not load it yet. */
+ *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
+ if (vmclear(vmx->shadow_vmcs_gpa))
+ return false;
return true;
}
@@ -544,26 +524,22 @@ void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
-bool kvm_vm_has_ept(struct kvm_vm *vm)
+bool kvm_cpu_has_ept(void)
{
- struct kvm_vcpu *vcpu;
uint64_t ctrl;
- vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
- TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
-
- ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
+ ctrl = kvm_get_feature_msr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
return false;
- ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
+ ctrl = kvm_get_feature_msr(MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
return ctrl & SECONDARY_EXEC_ENABLE_EPT;
}
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
- TEST_REQUIRE(kvm_vm_has_ept(vm));
+ TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT");
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 9a6e4f3ad6b5..feaf2be20ff2 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -11,6 +11,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
+#include <linux/sizes.h>
#include "kvm_util.h"
#include "test_util.h"
@@ -162,8 +163,7 @@ int main(int argc, char *argv[])
* just below the 4gb boundary. This test could create memory at
* 1gb-3gb,but it's simpler to skip straight to 4gb.
*/
- const uint64_t size_1gb = (1 << 30);
- const uint64_t start_gpa = (4ull * size_1gb);
+ const uint64_t start_gpa = SZ_4G;
const int first_slot = 1;
struct timespec time_start, time_run1, time_reset, time_run2;
@@ -180,29 +180,26 @@ int main(int argc, char *argv[])
* are quite common for x86, requires changing only max_mem (KVM allows
* 32k memslots, 32k * 2gb == ~64tb of guest memory).
*/
- slot_size = 2 * size_1gb;
+ slot_size = SZ_2G;
max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_slots > first_slot, "KVM is broken");
/* All KVM MMUs should be able to survive a 128gb guest. */
- max_mem = 128 * size_1gb;
+ max_mem = 128ull * SZ_1G;
calc_default_nr_vcpus();
while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
switch (opt) {
case 'c':
- nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0, "number of vcpus must be >0");
+ nr_vcpus = atoi_positive("Number of vCPUs", optarg);
break;
case 'm':
- max_mem = atoi(optarg) * size_1gb;
- TEST_ASSERT(max_mem > 0, "memory size must be >0");
+ max_mem = 1ull * atoi_positive("Memory size", optarg) * SZ_1G;
break;
case 's':
- slot_size = atoi(optarg) * size_1gb;
- TEST_ASSERT(slot_size > 0, "slot size must be >0");
+ slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G;
break;
case 'H':
hugepages = true;
@@ -245,7 +242,7 @@ int main(int argc, char *argv[])
#ifdef __x86_64__
/* Identity map memory in the guest using 1gb pages. */
- for (i = 0; i < slot_size; i += size_1gb)
+ for (i = 0; i < slot_size; i += SZ_1G)
__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
#else
for (i = 0; i < slot_size; i += vm->page_size)
@@ -260,7 +257,7 @@ int main(int argc, char *argv[])
vcpus = NULL;
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
- (gpa - start_gpa) / size_1gb, nr_vcpus);
+ (gpa - start_gpa) / SZ_1G, nr_vcpus);
rendezvous_with_vcpus(&time_start, "spawning");
rendezvous_with_vcpus(&time_run1, "run 1");
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index bb1d17a1171b..d07e921bfcc5 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -21,7 +21,7 @@
#include <linux/bitops.h>
#include <linux/userfaultfd.h>
-#include "perf_test_util.h"
+#include "memstress.h"
#include "processor.h"
#include "test_util.h"
#include "guest_modes.h"
@@ -36,7 +36,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_run *run;
@@ -72,10 +72,10 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
int i;
/*
- * Add the dummy memslot just below the perf_test_util memslot, which is
+ * Add the dummy memslot just below the memstress memslot, which is
* at the top of the guest physical address space.
*/
- gpa = perf_test_args.gpa - pages * vm->page_size;
+ gpa = memstress_args.gpa - pages * vm->page_size;
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
@@ -87,8 +87,8 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
}
struct test_params {
- useconds_t memslot_modification_delay;
- uint64_t nr_memslot_modifications;
+ useconds_t delay;
+ uint64_t nr_iterations;
bool partition_vcpu_memory_access;
};
@@ -97,25 +97,24 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct test_params *p = arg;
struct kvm_vm *vm;
- vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
+ vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
pr_info("Finished creating vCPUs\n");
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
- add_remove_memslot(vm, p->memslot_modification_delay,
- p->nr_memslot_modifications);
+ add_remove_memslot(vm, p->delay, p->nr_iterations);
run_vcpus = false;
- perf_test_join_vcpu_threads(nr_vcpus);
+ memstress_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");
- perf_test_destroy_vm(vm);
+ memstress_destroy_vm(vm);
}
static void help(char *name)
@@ -144,9 +143,8 @@ int main(int argc, char *argv[])
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
int opt;
struct test_params p = {
- .memslot_modification_delay = 0,
- .nr_memslot_modifications =
- DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS,
+ .delay = 0,
+ .nr_iterations = DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS,
.partition_vcpu_memory_access = true
};
@@ -158,16 +156,14 @@ int main(int argc, char *argv[])
guest_modes_cmdline(optarg);
break;
case 'd':
- p.memslot_modification_delay = strtoul(optarg, NULL, 0);
- TEST_ASSERT(p.memslot_modification_delay >= 0,
- "A negative delay is not supported.");
+ p.delay = atoi_non_negative("Delay", optarg);
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 'v':
- nr_vcpus = atoi(optarg);
- TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ nr_vcpus = atoi_positive("Number of vCPUs", optarg);
+ TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d",
max_vcpus);
break;
@@ -175,7 +171,7 @@ int main(int argc, char *argv[])
p.partition_vcpu_memory_access = false;
break;
case 'i':
- p.nr_memslot_modifications = atoi(optarg);
+ p.nr_iterations = atoi_positive("Number of iterations", optarg);
break;
case 'h':
default:
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 44995446d942..36b20abfb948 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -277,7 +277,6 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
- ucall_init(data->vm, NULL);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages);
@@ -885,40 +884,28 @@ static bool parse_args(int argc, char *argv[],
map_unmap_verify = true;
break;
case 's':
- targs->nslots = atoi(optarg);
+ targs->nslots = atoi_paranoid(optarg);
if (targs->nslots <= 0 && targs->nslots != -1) {
pr_info("Slot count cap has to be positive or -1 for no cap\n");
return false;
}
break;
case 'f':
- targs->tfirst = atoi(optarg);
- if (targs->tfirst < 0) {
- pr_info("First test to run has to be non-negative\n");
- return false;
- }
+ targs->tfirst = atoi_non_negative("First test", optarg);
break;
case 'e':
- targs->tlast = atoi(optarg);
- if (targs->tlast < 0 || targs->tlast >= NTESTS) {
+ targs->tlast = atoi_non_negative("Last test", optarg);
+ if (targs->tlast >= NTESTS) {
pr_info("Last test to run has to be non-negative and less than %zu\n",
NTESTS);
return false;
}
break;
case 'l':
- targs->seconds = atoi(optarg);
- if (targs->seconds < 0) {
- pr_info("Test length in seconds has to be non-negative\n");
- return false;
- }
+ targs->seconds = atoi_non_negative("Test length", optarg);
break;
case 'r':
- targs->runs = atoi(optarg);
- if (targs->runs <= 0) {
- pr_info("Runs per test has to be positive\n");
- return false;
- }
+ targs->runs = atoi_positive("Runs per test", optarg);
break;
}
}
@@ -1007,9 +994,6 @@ int main(int argc, char *argv[])
struct test_result rbestslottime;
int tctr;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
if (!parse_args(argc, argv, &targs))
return -1;
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index 6f88da7e60be..3045fdf9bdf5 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -205,9 +205,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
u32 cpu, rseq_cpu;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
@@ -224,7 +221,6 @@ int main(int argc, char *argv[])
* CPU affinity.
*/
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- ucall_init(vm, NULL);
pthread_create(&migration_thread, NULL, migration_worker,
(void *)(unsigned long)syscall(SYS_gettid));
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index 9113696d5178..3fd81e58f40c 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -760,8 +760,6 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
- setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
-
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
index 19486084eb30..e41e2cb8ffa9 100644
--- a/tools/testing/selftests/kvm/s390x/resets.c
+++ b/tools/testing/selftests/kvm/s390x/resets.c
@@ -296,8 +296,6 @@ int main(int argc, char *argv[])
bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
int idx;
- setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
-
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index 3fdb6e2598eb..2ddde41c44ba 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -231,9 +231,6 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 0d55f508d595..2ef1d1b72ce4 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -392,9 +392,6 @@ int main(int argc, char *argv[])
int i, loops;
#endif
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
#ifdef __x86_64__
/*
* FIXME: the zero-memslot test fails on aarch64 and s390x because
@@ -407,7 +404,7 @@ int main(int argc, char *argv[])
#ifdef __x86_64__
if (argc > 1)
- loops = atoi(argv[1]);
+ loops = atoi_positive("Number of iterations", argv[1]);
else
loops = 10;
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index db8967f1a17b..c87f38712073 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -266,7 +266,6 @@ int main(int ac, char **av)
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
- ucall_init(vm, NULL);
TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c
index 1c274933912b..7f5b330b6a1b 100644
--- a/tools/testing/selftests/kvm/system_counter_offset_test.c
+++ b/tools/testing/selftests/kvm/system_counter_offset_test.c
@@ -121,7 +121,6 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
check_preconditions(vcpu);
- ucall_init(vm, NULL);
enter_guest(vcpu);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c
index dadcbad10a1d..21de6ae42086 100644
--- a/tools/testing/selftests/kvm/x86_64/amx_test.c
+++ b/tools/testing/selftests/kvm/x86_64/amx_test.c
@@ -39,11 +39,6 @@
#define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA)
#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
-#define TILE_CPUID 0x1d
-#define XSTATE_CPUID 0xd
-#define TILE_PALETTE_CPUID_SUBLEAVE 0x1
-#define XSTATE_USER_STATE_SUBLEAVE 0x0
-
#define XSAVE_HDR_OFFSET 512
struct xsave_data {
@@ -129,71 +124,26 @@ static bool check_xsave_supports_xtile(void)
return __xgetbv(0) & XFEATURE_MASK_XTILE;
}
-static bool enum_xtile_config(void)
-{
- u32 eax, ebx, ecx, edx;
-
- __cpuid(TILE_CPUID, TILE_PALETTE_CPUID_SUBLEAVE, &eax, &ebx, &ecx, &edx);
- if (!eax || !ebx || !ecx)
- return false;
-
- xtile.max_names = ebx >> 16;
- if (xtile.max_names < NUM_TILES)
- return false;
-
- xtile.bytes_per_tile = eax >> 16;
- if (xtile.bytes_per_tile < TILE_SIZE)
- return false;
-
- xtile.bytes_per_row = ebx;
- xtile.max_rows = ecx;
-
- return true;
-}
-
-static bool enum_xsave_tile(void)
-{
- u32 eax, ebx, ecx, edx;
-
- __cpuid(XSTATE_CPUID, XFEATURE_XTILEDATA, &eax, &ebx, &ecx, &edx);
- if (!eax || !ebx)
- return false;
-
- xtile.xsave_offset = ebx;
- xtile.xsave_size = eax;
-
- return true;
-}
-
-static bool check_xsave_size(void)
-{
- u32 eax, ebx, ecx, edx;
- bool valid = false;
-
- __cpuid(XSTATE_CPUID, XSTATE_USER_STATE_SUBLEAVE, &eax, &ebx, &ecx, &edx);
- if (ebx && ebx <= XSAVE_SIZE)
- valid = true;
-
- return valid;
-}
-
-static bool check_xtile_info(void)
+static void check_xtile_info(void)
{
- bool ret = false;
-
- if (!check_xsave_size())
- return ret;
-
- if (!enum_xsave_tile())
- return ret;
-
- if (!enum_xtile_config())
- return ret;
+ GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
+ GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
- if (sizeof(struct tile_data) >= xtile.xsave_size)
- ret = true;
+ xtile.xsave_offset = this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET);
+ GUEST_ASSERT(xtile.xsave_offset == 2816);
+ xtile.xsave_size = this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE);
+ GUEST_ASSERT(xtile.xsave_size == 8192);
+ GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size);
- return ret;
+ GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS));
+ xtile.max_names = this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS);
+ GUEST_ASSERT(xtile.max_names == 8);
+ xtile.bytes_per_tile = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE);
+ GUEST_ASSERT(xtile.bytes_per_tile == 1024);
+ xtile.bytes_per_row = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW);
+ GUEST_ASSERT(xtile.bytes_per_row == 64);
+ xtile.max_rows = this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS);
+ GUEST_ASSERT(xtile.max_rows == 16);
}
static void set_tilecfg(struct tile_config *cfg)
@@ -238,16 +188,8 @@ static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
{
init_regs();
check_cpuid_xsave();
- GUEST_ASSERT(check_xsave_supports_xtile());
- GUEST_ASSERT(check_xtile_info());
-
- /* check xtile configs */
- GUEST_ASSERT(xtile.xsave_offset == 2816);
- GUEST_ASSERT(xtile.xsave_size == 8192);
- GUEST_ASSERT(xtile.max_names == 8);
- GUEST_ASSERT(xtile.bytes_per_tile == 1024);
- GUEST_ASSERT(xtile.bytes_per_row == 64);
- GUEST_ASSERT(xtile.max_rows == 16);
+ check_xsave_supports_xtile();
+ check_xtile_info();
GUEST_SYNC(1);
/* xfd=0, enable amx */
@@ -317,8 +259,9 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
- /* Get xsave/restore max size */
- xsave_restore_size = kvm_get_supported_cpuid_entry(0xd)->ecx;
+ TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE),
+ "KVM should enumerate max XSAVE size when XSAVE is supported");
+ xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
run = vcpu->run;
vcpu_regs_get(vcpu, &regs1);
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index a6aeee2e62e4..2fc3ad9c887e 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -43,15 +43,6 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
}
-static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
-{
- u32 eax, ebx, ecx, edx;
-
- cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
-
- GUEST_ASSERT(eax == 0x40000001);
-}
-
static void guest_main(struct kvm_cpuid2 *guest_cpuid)
{
GUEST_SYNC(1);
@@ -60,7 +51,7 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
GUEST_SYNC(2);
- test_cpuid_40000000(guest_cpuid);
+ GUEST_ASSERT(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF) == 0x40000001);
GUEST_DONE();
}
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 4208487652f8..1027a671c7d3 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -57,9 +57,6 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
deleted file mode 100644
index 236e11755ba6..000000000000
--- a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2020, Google LLC.
- *
- * Tests for KVM_CAP_EXIT_ON_EMULATION_FAILURE capability.
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "vmx.h"
-
-#define MAXPHYADDR 36
-
-#define MEM_REGION_GVA 0x0000123456789000
-#define MEM_REGION_GPA 0x0000000700000000
-#define MEM_REGION_SLOT 10
-#define MEM_REGION_SIZE PAGE_SIZE
-
-static void guest_code(void)
-{
- __asm__ __volatile__("flds (%[addr])"
- :: [addr]"r"(MEM_REGION_GVA));
-
- GUEST_DONE();
-}
-
-/*
- * Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
- * figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
- */
-#define GET_RM(insn_byte) (insn_byte & 0x7)
-#define GET_REG(insn_byte) ((insn_byte & 0x38) >> 3)
-#define GET_MOD(insn_byte) ((insn_byte & 0xc) >> 6)
-
-/* Ensure we are dealing with a simple 2-byte flds instruction. */
-static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
-{
- return insn_size >= 2 &&
- insn_bytes[0] == 0xd9 &&
- GET_REG(insn_bytes[1]) == 0x0 &&
- GET_MOD(insn_bytes[1]) == 0x0 &&
- /* Ensure there is no SIB byte. */
- GET_RM(insn_bytes[1]) != 0x4 &&
- /* Ensure there is no displacement byte. */
- GET_RM(insn_bytes[1]) != 0x5;
-}
-
-static void process_exit_on_emulation_error(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- struct kvm_regs regs;
- uint8_t *insn_bytes;
- uint8_t insn_size;
- uint64_t flags;
-
- TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
- "Unexpected exit reason: %u (%s)",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
- "Unexpected suberror: %u",
- run->emulation_failure.suberror);
-
- if (run->emulation_failure.ndata >= 1) {
- flags = run->emulation_failure.flags;
- if ((flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES) &&
- run->emulation_failure.ndata >= 3) {
- insn_size = run->emulation_failure.insn_size;
- insn_bytes = run->emulation_failure.insn_bytes;
-
- TEST_ASSERT(insn_size <= 15 && insn_size > 0,
- "Unexpected instruction size: %u",
- insn_size);
-
- TEST_ASSERT(is_flds(insn_bytes, insn_size),
- "Unexpected instruction. Expected 'flds' (0xd9 /0)");
-
- /*
- * If is_flds() succeeded then the instruction bytes
- * contained an flds instruction that is 2-bytes in
- * length (ie: no prefix, no SIB, no displacement).
- */
- vcpu_regs_get(vcpu, &regs);
- regs.rip += 2;
- vcpu_regs_set(vcpu, &regs);
- }
- }
-}
-
-static void do_guest_assert(struct ucall *uc)
-{
- REPORT_GUEST_ASSERT(*uc);
-}
-
-static void check_for_guest_assert(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- if (vcpu->run->exit_reason == KVM_EXIT_IO &&
- get_ucall(vcpu, &uc) == UCALL_ABORT) {
- do_guest_assert(&uc);
- }
-}
-
-static void process_ucall_done(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- struct ucall uc;
-
- check_for_guest_assert(vcpu);
-
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s)",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
- "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
- uc.cmd, UCALL_DONE);
-}
-
-static uint64_t process_ucall(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- struct ucall uc;
-
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s)",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_SYNC:
- break;
- case UCALL_ABORT:
- do_guest_assert(&uc);
- break;
- case UCALL_DONE:
- process_ucall_done(vcpu);
- break;
- default:
- TEST_ASSERT(false, "Unexpected ucall");
- }
-
- return uc.cmd;
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- uint64_t gpa, pte;
- uint64_t *hva;
- int rc;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
-
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
- vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR);
-
- rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
- TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
- vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
-
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- MEM_REGION_GPA, MEM_REGION_SLOT,
- MEM_REGION_SIZE / PAGE_SIZE, 0);
- gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
- MEM_REGION_GPA, MEM_REGION_SLOT);
- TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
- virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
- hva = addr_gpa2hva(vm, MEM_REGION_GPA);
- memset(hva, 0, PAGE_SIZE);
- pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA);
- vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36));
-
- vcpu_run(vcpu);
- process_exit_on_emulation_error(vcpu);
- vcpu_run(vcpu);
-
- TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE");
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
new file mode 100644
index 000000000000..37c61f712fd5
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022, Google LLC.
+ *
+ * Test for KVM_CAP_EXIT_ON_EMULATION_FAILURE.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+
+#include "flds_emulation.h"
+
+#include "test_util.h"
+
+#define MMIO_GPA 0x700000000
+#define MMIO_GVA MMIO_GPA
+
+static void guest_code(void)
+{
+ /* Execute flds with an MMIO address to force KVM to emulate it. */
+ flds(MMIO_GVA);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
+ virt_map(vm, MMIO_GVA, MMIO_GPA, 1);
+
+ vcpu_run(vcpu);
+ handle_flds_emulation_failure_exit(vcpu);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/flds_emulation.h b/tools/testing/selftests/kvm/x86_64/flds_emulation.h
new file mode 100644
index 000000000000..e43a7df25f2c
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/flds_emulation.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_FLDS_EMULATION_H
+#define SELFTEST_KVM_FLDS_EMULATION_H
+
+#include "kvm_util.h"
+
+#define FLDS_MEM_EAX ".byte 0xd9, 0x00"
+
+/*
+ * flds is an instruction that the KVM instruction emulator is known not to
+ * support. This can be used in guest code along with a mechanism to force
+ * KVM to emulate the instruction (e.g. by providing an MMIO address) to
+ * exercise emulation failures.
+ */
+static inline void flds(uint64_t address)
+{
+ __asm__ __volatile__(FLDS_MEM_EAX :: "a"(address));
+}
+
+static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ struct kvm_regs regs;
+ uint8_t *insn_bytes;
+ uint64_t flags;
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
+ "Unexpected exit reason: %u (%s)",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
+ "Unexpected suberror: %u",
+ run->emulation_failure.suberror);
+
+ flags = run->emulation_failure.flags;
+ TEST_ASSERT(run->emulation_failure.ndata >= 3 &&
+ flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES,
+ "run->emulation_failure is missing instruction bytes");
+
+ TEST_ASSERT(run->emulation_failure.insn_size >= 2,
+ "Expected a 2-byte opcode for 'flds', got %d bytes",
+ run->emulation_failure.insn_size);
+
+ insn_bytes = run->emulation_failure.insn_bytes;
+ TEST_ASSERT(insn_bytes[0] == 0xd9 && insn_bytes[1] == 0,
+ "Expected 'flds [eax]', opcode '0xd9 0x00', got opcode 0x%02x 0x%02x\n",
+ insn_bytes[0], insn_bytes[1]);
+
+ vcpu_regs_get(vcpu, &regs);
+ regs.rip += 2;
+ vcpu_regs_set(vcpu, &regs);
+}
+
+#endif /* !SELFTEST_KVM_FLDS_EMULATION_H */
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index e804eb08dff9..5c27efbf405e 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -134,9 +134,6 @@ int main(int argc, char *argv[])
const struct kvm_cpuid2 *hv_cpuid_entries;
struct kvm_vcpu *vcpu;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
index 99bc202243d2..ba09d300c953 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
@@ -16,6 +16,7 @@
#include "kvm_util.h"
+#include "hyperv.h"
#include "vmx.h"
static int ud_count;
@@ -30,24 +31,19 @@ static void guest_nmi_handler(struct ex_regs *regs)
{
}
-/* Exits to L1 destroy GRPs! */
-static inline void rdmsr_fs_base(void)
+static inline void rdmsr_from_l2(uint32_t msr)
{
- __asm__ __volatile__ ("mov $0xc0000100, %%rcx; rdmsr" : : :
- "rax", "rbx", "rcx", "rdx",
- "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
- "r13", "r14", "r15");
-}
-static inline void rdmsr_gs_base(void)
-{
- __asm__ __volatile__ ("mov $0xc0000101, %%rcx; rdmsr" : : :
- "rax", "rbx", "rcx", "rdx",
- "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
- "r13", "r14", "r15");
+ /* Currently, L1 doesn't preserve GPRs during vmexits. */
+ __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+ "rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15");
}
+/* Exit to L1 from L2 with RDMSR instruction */
void l2_guest_code(void)
{
+ u64 unused;
+
GUEST_SYNC(7);
GUEST_SYNC(8);
@@ -58,42 +54,58 @@ void l2_guest_code(void)
vmcall();
/* MSR-Bitmap tests */
- rdmsr_fs_base(); /* intercepted */
- rdmsr_fs_base(); /* intercepted */
- rdmsr_gs_base(); /* not intercepted */
+ rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+ rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+ rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
vmcall();
- rdmsr_gs_base(); /* intercepted */
+ rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
+
+ /* L2 TLB flush tests */
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
+ rdmsr_from_l2(MSR_FS_BASE);
+ /*
+ * Note: hypercall status (RAX) is not preserved correctly by L1 after
+ * synthetic vmexit, use unchecked version.
+ */
+ __hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS,
+ &unused);
/* Done, exit to L1 and never come back. */
vmcall();
}
-void guest_code(struct vmx_pages *vmx_pages)
+void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages,
+ vm_vaddr_t hv_hcall_page_gpa)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, hv_hcall_page_gpa);
+
x2apic_enable();
GUEST_SYNC(1);
GUEST_SYNC(2);
- enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
+ enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
+ evmcs_enable();
- GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_SYNC(3);
- GUEST_ASSERT(load_vmcs(vmx_pages));
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT(load_evmcs(hv_pages));
+ GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
GUEST_SYNC(4);
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(5);
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
current_evmcs->revision_id = -1u;
GUEST_ASSERT(vmlaunch());
current_evmcs->revision_id = EVMCS_VERSION;
@@ -102,8 +114,18 @@ void guest_code(struct vmx_pages *vmx_pages)
vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
PIN_BASED_NMI_EXITING);
+ /* L2 TLB flush setup */
+ current_evmcs->partition_assist_page = hv_pages->partition_assist_gpa;
+ current_evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
+ current_evmcs->hv_vm_id = 1;
+ current_evmcs->hv_vp_id = 1;
+ current_vp_assist->nested_control.features.directhypercall = 1;
+ *(u32 *)(hv_pages->partition_assist) = 0;
+
GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
+ GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), NMI_VECTOR);
+ GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
/*
* NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
@@ -146,12 +168,24 @@ void guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
current_evmcs->guest_rip += 2; /* rdmsr */
+ /*
+ * L2 TLB flush test. First VMCALL should be handled directly by L0,
+ * no VMCALL exit expected.
+ */
+ GUEST_ASSERT(!vmresume());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
+ current_evmcs->guest_rip += 2; /* rdmsr */
+ /* Enable synthetic vmexit */
+ *(u32 *)(hv_pages->partition_assist) = 1;
+ GUEST_ASSERT(!vmresume());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH);
+
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_SYNC(11);
/* Try enlightened vmptrld with an incorrect GPA */
- evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
+ evmcs_vmptrld(0xdeadbeef, hv_pages->enlightened_vmcs);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(ud_count == 1);
GUEST_DONE();
@@ -198,7 +232,8 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva = 0;
+ vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
+ vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -212,11 +247,16 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
+ hcall_page = vm_vaddr_alloc_pages(vm, 1);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
+
vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
+ vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 05b32e550a80..3163c3e8db0a 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -13,25 +13,6 @@
#include "processor.h"
#include "hyperv.h"
-#define LINUX_OS_ID ((u64)0x8100 << 48)
-
-static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address, uint64_t *hv_status)
-{
- uint8_t vector;
-
- /* Note both the hypercall and the "asm safe" clobber r9-r11. */
- asm volatile("mov %[output_address], %%r8\n\t"
- KVM_ASM_SAFE("vmcall")
- : "=a" (*hv_status),
- "+c" (control), "+d" (input_address),
- KVM_ASM_SAFE_OUTPUTS(vector)
- : [output_address] "r"(output_address),
- "a" (-EFAULT)
- : "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
- return vector;
-}
-
struct msr_data {
uint32_t idx;
bool available;
@@ -71,7 +52,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
GUEST_ASSERT(hcall->control);
- wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
@@ -81,7 +62,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
input = output = 0;
}
- vector = hypercall(hcall->control, input, output, &res);
+ vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) {
GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
} else {
@@ -169,7 +150,7 @@ static void guest_test_msrs_access(void)
*/
msr->idx = HV_X64_MSR_GUEST_OS_ID;
msr->write = 1;
- msr->write_val = LINUX_OS_ID;
+ msr->write_val = HYPERV_LINUX_OS_ID;
msr->available = 1;
break;
case 3:
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
new file mode 100644
index 000000000000..8b791eac7d5a
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
+ *
+ * Copyright (C) 2022, Red Hat, Inc.
+ *
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <pthread.h>
+#include <inttypes.h>
+
+#include "kvm_util.h"
+#include "hyperv.h"
+#include "test_util.h"
+#include "vmx.h"
+
+#define RECEIVER_VCPU_ID_1 2
+#define RECEIVER_VCPU_ID_2 65
+
+#define IPI_VECTOR 0xfe
+
+static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
+
+struct hv_vpset {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[2];
+};
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+};
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+};
+
+static inline void hv_init(vm_vaddr_t pgs_gpa)
+{
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
+}
+
+static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
+{
+ u32 vcpu_id;
+
+ x2apic_enable();
+ hv_init(pgs_gpa);
+
+ vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
+
+ /* Signal sender vCPU we're ready */
+ ipis_rcvd[vcpu_id] = (u64)-1;
+
+ for (;;)
+ asm volatile("sti; hlt; cli");
+}
+
+static void guest_ipi_handler(struct ex_regs *regs)
+{
+ u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
+
+ ipis_rcvd[vcpu_id]++;
+ wrmsr(HV_X64_MSR_EOI, 1);
+}
+
+static inline void nop_loop(void)
+{
+ int i;
+
+ for (i = 0; i < 100000000; i++)
+ asm volatile("nop");
+}
+
+static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
+{
+ struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
+ struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
+ int stage = 1, ipis_expected[2] = {0};
+
+ hv_init(pgs_gpa);
+ GUEST_SYNC(stage++);
+
+ /* Wait for receiver vCPUs to come up */
+ while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
+ nop_loop();
+ ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
+
+ /* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
+ ipi->vector = IPI_VECTOR;
+ ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
+ hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
+ GUEST_SYNC(stage++);
+ /* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
+ hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
+ IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
+ GUEST_SYNC(stage++);
+
+ /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
+ memset(hcall_page, 0, 4096);
+ ipi_ex->vector = IPI_VECTOR;
+ ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ ipi_ex->vp_set.valid_bank_mask = 1 << 0;
+ ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ pgs_gpa, pgs_gpa + 4096);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
+ GUEST_SYNC(stage++);
+ /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
+ hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
+ GUEST_SYNC(stage++);
+
+ /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
+ memset(hcall_page, 0, 4096);
+ ipi_ex->vector = IPI_VECTOR;
+ ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ ipi_ex->vp_set.valid_bank_mask = 1 << 1;
+ ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ pgs_gpa, pgs_gpa + 4096);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+ /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
+ hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+
+ /* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
+ memset(hcall_page, 0, 4096);
+ ipi_ex->vector = IPI_VECTOR;
+ ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
+ ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
+ ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
+ pgs_gpa, pgs_gpa + 4096);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+ /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
+ hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
+ (2 << HV_HYPERCALL_VARHEAD_OFFSET),
+ IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+
+ /* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
+ memset(hcall_page, 0, 4096);
+ ipi_ex->vector = IPI_VECTOR;
+ ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
+ hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+ /*
+ * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
+ * Nothing to write anything to XMM regs.
+ */
+ hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
+ IPI_VECTOR, HV_GENERIC_SET_ALL);
+ nop_loop();
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
+ GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
+ GUEST_SYNC(stage++);
+
+ GUEST_DONE();
+}
+
+static void *vcpu_thread(void *arg)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
+ int old, r;
+
+ r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
+ TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
+ vcpu->id, r);
+
+ vcpu_run(vcpu);
+
+ TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
+
+ return NULL;
+}
+
+static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
+{
+ void *retval;
+ int r;
+
+ r = pthread_cancel(thread);
+ TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
+ vcpu->id, r);
+
+ r = pthread_join(thread, &retval);
+ TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
+ vcpu->id, r);
+ TEST_ASSERT(retval == PTHREAD_CANCELED,
+ "expected retval=%p, got %p", PTHREAD_CANCELED,
+ retval);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu[3];
+ unsigned int exit_reason;
+ vm_vaddr_t hcall_page;
+ pthread_t threads[2];
+ int stage = 1, r;
+ struct ucall uc;
+
+ vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
+
+ /* Hypercall input/output */
+ hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+ vm_init_descriptor_tables(vm);
+
+ vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
+ vcpu_init_descriptor_tables(vcpu[1]);
+ vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
+ vcpu_set_hv_cpuid(vcpu[1]);
+
+ vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
+ vcpu_init_descriptor_tables(vcpu[2]);
+ vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
+ vcpu_set_hv_cpuid(vcpu[2]);
+
+ vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
+
+ vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_hv_cpuid(vcpu[0]);
+
+ r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
+ TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
+
+ r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
+ TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
+
+ while (true) {
+ vcpu_run(vcpu[0]);
+
+ exit_reason = vcpu[0]->run->exit_reason;
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason: %u (%s)",
+ exit_reason, exit_reason_str(exit_reason));
+
+ switch (get_ucall(vcpu[0], &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == stage,
+ "Unexpected stage: %ld (%d expected)\n",
+ uc.args[1], stage);
+ break;
+ case UCALL_DONE:
+ goto done;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ stage++;
+ }
+
+done:
+ cancel_join_vcpu_thread(threads[0], vcpu[1]);
+ cancel_join_vcpu_thread(threads[1], vcpu[2]);
+ kvm_vm_free(vm);
+
+ return r;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index a380ad7bb9b3..3b3cc94ba8e4 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -23,59 +23,78 @@
#define L2_GUEST_STACK_SIZE 256
-struct hv_enlightenments {
- struct __packed hv_enlightenments_control {
- u32 nested_flush_hypercall:1;
- u32 msr_bitmap:1;
- u32 enlightened_npt_tlb: 1;
- u32 reserved:29;
- } __packed hv_enlightenments_control;
- u32 hv_vp_id;
- u64 hv_vm_id;
- u64 partition_assist_page;
- u64 reserved;
-} __packed;
-
-/*
- * Hyper-V uses the software reserved clean bit in VMCB
- */
-#define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31)
+/* Exit to L1 from L2 with RDMSR instruction */
+static inline void rdmsr_from_l2(uint32_t msr)
+{
+ /* Currently, L1 doesn't preserve GPRs during vmexits. */
+ __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+ "rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15");
+}
void l2_guest_code(void)
{
+ u64 unused;
+
GUEST_SYNC(3);
/* Exit to L1 */
vmmcall();
/* MSR-Bitmap tests */
- rdmsr(MSR_FS_BASE); /* intercepted */
- rdmsr(MSR_FS_BASE); /* intercepted */
- rdmsr(MSR_GS_BASE); /* not intercepted */
+ rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+ rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+ rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
vmmcall();
- rdmsr(MSR_GS_BASE); /* intercepted */
+ rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
GUEST_SYNC(5);
+ /* L2 TLB flush tests */
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS);
+ rdmsr_from_l2(MSR_FS_BASE);
+ /*
+ * Note: hypercall status (RAX) is not preserved correctly by L1 after
+ * synthetic vmexit, use unchecked version.
+ */
+ __hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS, &unused);
+
/* Done, exit to L1 and never come back. */
vmmcall();
}
-static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
+static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
+ struct hyperv_test_pages *hv_pages,
+ vm_vaddr_t pgs_gpa)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
- struct hv_enlightenments *hve =
- (struct hv_enlightenments *)vmcb->control.reserved_sw;
+ struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
GUEST_SYNC(1);
- wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
+ enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
GUEST_ASSERT(svm->vmcb_gpa);
/* Prepare for L2 execution. */
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ /* L2 TLB flush setup */
+ hve->partition_assist_page = hv_pages->partition_assist_gpa;
+ hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+ hve->hv_vm_id = 1;
+ hve->hv_vp_id = 1;
+ current_vp_assist->nested_control.features.directhypercall = 1;
+ *(u32 *)(hv_pages->partition_assist) = 0;
+
GUEST_SYNC(2);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
@@ -98,18 +117,32 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
/* Intercept RDMSR 0xc0000101 without telling KVM about it */
set_bit(2 * (MSR_GS_BASE & 0x1fff), svm->msr + 0x800);
/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
- vmcb->control.clean |= VMCB_HV_NESTED_ENLIGHTENMENTS;
+ vmcb->control.clean |= HV_VMCB_NESTED_ENLIGHTENMENTS;
run_guest(vmcb, svm->vmcb_gpa);
/* Make sure we don't see SVM_EXIT_MSR here so eMSR bitmap works */
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
vmcb->save.rip += 3; /* vmcall */
/* Now tell KVM we've changed MSR-Bitmap */
- vmcb->control.clean &= ~VMCB_HV_NESTED_ENLIGHTENMENTS;
+ vmcb->control.clean &= ~HV_VMCB_NESTED_ENLIGHTENMENTS;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
+
+ /*
+ * L2 TLB flush test. First VMCALL should be handled directly by L0,
+ * no VMCALL exit expected.
+ */
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
+ vmcb->save.rip += 2; /* rdmsr */
+ /* Enable synthetic vmexit */
+ *(u32 *)(hv_pages->partition_assist) = 1;
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL);
+ GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH);
+
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(6);
@@ -119,8 +152,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva = 0;
-
+ vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
+ vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
@@ -134,7 +167,13 @@ int main(int argc, char *argv[])
vcpu_set_hv_cpuid(vcpu);
run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva);
- vcpu_args_set(vcpu, 1, nested_gva);
+ vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
+
+ hcall_page = vm_vaddr_alloc_pages(vm, 1);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
+
+ vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
new file mode 100644
index 000000000000..68f97ff720a7
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
+ *
+ * Copyright (C) 2022, Red Hat, Inc.
+ *
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <asm/barrier.h>
+#include <pthread.h>
+#include <inttypes.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "hyperv.h"
+#include "test_util.h"
+#include "vmx.h"
+
+#define WORKER_VCPU_ID_1 2
+#define WORKER_VCPU_ID_2 65
+
+#define NTRY 100
+#define NTEST_PAGES 2
+
+struct hv_vpset {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+};
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
+struct hv_tlb_flush {
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+/*
+ * Pass the following info to 'workers' and 'sender'
+ * - Hypercall page's GVA
+ * - Hypercall page's GPA
+ * - Test pages GVA
+ * - GVAs of the test pages' PTEs
+ */
+struct test_data {
+ vm_vaddr_t hcall_gva;
+ vm_paddr_t hcall_gpa;
+ vm_vaddr_t test_pages;
+ vm_vaddr_t test_pages_pte[NTEST_PAGES];
+};
+
+/* 'Worker' vCPU code checking the contents of the test page */
+static void worker_guest_code(vm_vaddr_t test_data)
+{
+ struct test_data *data = (struct test_data *)test_data;
+ u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
+ void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;
+ u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64));
+ u64 expected, val;
+
+ x2apic_enable();
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+
+ for (;;) {
+ cpu_relax();
+
+ expected = READ_ONCE(*this_cpu);
+
+ /*
+ * Make sure the value in the test page is read after reading
+ * the expectation for the first time. Pairs with wmb() in
+ * prepare_to_test().
+ */
+ rmb();
+
+ val = READ_ONCE(*(u64 *)data->test_pages);
+
+ /*
+ * Make sure the value in the test page is read after before
+ * reading the expectation for the second time. Pairs with wmb()
+ * post_test().
+ */
+ rmb();
+
+ /*
+ * '0' indicates the sender is between iterations, wait until
+ * the sender is ready for this vCPU to start checking again.
+ */
+ if (!expected)
+ continue;
+
+ /*
+ * Re-read the per-vCPU byte to ensure the sender didn't move
+ * onto a new iteration.
+ */
+ if (expected != READ_ONCE(*this_cpu))
+ continue;
+
+ GUEST_ASSERT(val == expected);
+ }
+}
+
+/*
+ * Write per-CPU info indicating what each 'worker' CPU is supposed to see in
+ * test page. '0' means don't check.
+ */
+static void set_expected_val(void *addr, u64 val, int vcpu_id)
+{
+ void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;
+
+ *(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val;
+}
+
+/*
+ * Update PTEs swapping two test pages.
+ * TODO: use swap()/xchg() when these are provided.
+ */
+static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)
+{
+ uint64_t tmp = *(uint64_t *)pte_gva1;
+
+ *(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;
+ *(uint64_t *)pte_gva2 = tmp;
+}
+
+/*
+ * TODO: replace the silly NOP loop with a proper udelay() implementation.
+ */
+static inline void do_delay(void)
+{
+ int i;
+
+ for (i = 0; i < 1000000; i++)
+ asm volatile("nop");
+}
+
+/*
+ * Prepare to test: 'disable' workers by setting the expectation to '0',
+ * clear hypercall input page and then swap two test pages.
+ */
+static inline void prepare_to_test(struct test_data *data)
+{
+ /* Clear hypercall input page */
+ memset((void *)data->hcall_gva, 0, PAGE_SIZE);
+
+ /* 'Disable' workers */
+ set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1);
+ set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2);
+
+ /* Make sure workers are 'disabled' before we swap PTEs. */
+ wmb();
+
+ /* Make sure workers have enough time to notice */
+ do_delay();
+
+ /* Swap test page mappings */
+ swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]);
+}
+
+/*
+ * Finalize the test: check hypercall resule set the expected val for
+ * 'worker' CPUs and give them some time to test.
+ */
+static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)
+{
+ /* Make sure we change the expectation after swapping PTEs */
+ wmb();
+
+ /* Set the expectation for workers, '0' means don't test */
+ set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1);
+ set_expected_val((void *)data->test_pages, exp2, WORKER_VCPU_ID_2);
+
+ /* Make sure workers have enough time to test */
+ do_delay();
+}
+
+#define TESTVAL1 0x0101010101010101
+#define TESTVAL2 0x0202020202020202
+
+/* Main vCPU doing the test */
+static void sender_guest_code(vm_vaddr_t test_data)
+{
+ struct test_data *data = (struct test_data *)test_data;
+ struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;
+ struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;
+ vm_paddr_t hcall_gpa = data->hcall_gpa;
+ int i, stage = 1;
+
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, data->hcall_gpa);
+
+ /* "Slow" hypercalls */
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush->processor_mask = BIT(WORKER_VCPU_ID_1);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
+ hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush->processor_mask = BIT(WORKER_VCPU_ID_1);
+ flush->gva_list[0] = (u64)data->test_pages;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS;
+ flush->processor_mask = 0;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
+ hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS;
+ flush->gva_list[0] = (u64)data->test_pages;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ /* bank_contents and gva_list occupy the same space, thus [1] */
+ flush_ex->gva_list[1] = (u64)data->test_pages;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
+ BIT_ULL(WORKER_VCPU_ID_1 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
+ flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
+ (2 << HV_HYPERCALL_VARHEAD_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
+ BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
+ flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ /* bank_contents and gva_list occupy the same space, thus [2] */
+ flush_ex->gva_list[2] = (u64)data->test_pages;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
+ flush_ex->gva_list[0] = (u64)data->test_pages;
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ hcall_gpa, hcall_gpa + PAGE_SIZE);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ /* "Fast" hypercalls */
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->processor_mask = BIT(WORKER_VCPU_ID_1);
+ hyperv_write_xmm_input(&flush->processor_mask, 1);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->processor_mask = BIT(WORKER_VCPU_ID_1);
+ flush->gva_list[0] = (u64)data->test_pages;
+ hyperv_write_xmm_input(&flush->processor_mask, 1);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
+ HV_HYPERCALL_FAST_BIT |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ hyperv_write_xmm_input(&flush->processor_mask, 1);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush->gva_list[0] = (u64)data->test_pages;
+ hyperv_write_xmm_input(&flush->processor_mask, 1);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
+ HV_HYPERCALL_FAST_BIT |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
+ HV_HYPERCALL_FAST_BIT |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ /* bank_contents and gva_list occupy the same space, thus [1] */
+ flush_ex->gva_list[1] = (u64)data->test_pages;
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ HV_HYPERCALL_FAST_BIT |
+ (1 << HV_HYPERCALL_VARHEAD_OFFSET) |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
+ BIT_ULL(WORKER_VCPU_ID_1 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
+ flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
+ HV_HYPERCALL_FAST_BIT |
+ (2 << HV_HYPERCALL_VARHEAD_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 :
+ TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
+ BIT_ULL(WORKER_VCPU_ID_2 / 64);
+ flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
+ flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
+ /* bank_contents and gva_list occupy the same space, thus [2] */
+ flush_ex->gva_list[2] = (u64)data->test_pages;
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 3);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ HV_HYPERCALL_FAST_BIT |
+ (2 << HV_HYPERCALL_VARHEAD_OFFSET) |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
+ HV_HYPERCALL_FAST_BIT,
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_SYNC(stage++);
+
+ /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
+ for (i = 0; i < NTRY; i++) {
+ prepare_to_test(data);
+ flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
+ flush_ex->gva_list[0] = (u64)data->test_pages;
+ hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
+ HV_HYPERCALL_FAST_BIT |
+ (1UL << HV_HYPERCALL_REP_COMP_OFFSET),
+ 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
+ post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
+ i % 2 ? TESTVAL1 : TESTVAL2);
+ }
+
+ GUEST_DONE();
+}
+
+static void *vcpu_thread(void *arg)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
+ struct ucall uc;
+ int old;
+ int r;
+ unsigned int exit_reason;
+
+ r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
+ TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
+ vcpu->id, r);
+
+ vcpu_run(vcpu);
+ exit_reason = vcpu->run->exit_reason;
+
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ vcpu->id, exit_reason, exit_reason_str(exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ default:
+ TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc.cmd, vcpu->id);
+ }
+
+ return NULL;
+}
+
+static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
+{
+ void *retval;
+ int r;
+
+ r = pthread_cancel(thread);
+ TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
+ vcpu->id, r);
+
+ r = pthread_join(thread, &retval);
+ TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
+ vcpu->id, r);
+ TEST_ASSERT(retval == PTHREAD_CANCELED,
+ "expected retval=%p, got %p", PTHREAD_CANCELED,
+ retval);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu[3];
+ unsigned int exit_reason;
+ pthread_t threads[2];
+ vm_vaddr_t test_data_page, gva;
+ vm_paddr_t gpa;
+ uint64_t *pte;
+ struct test_data *data;
+ struct ucall uc;
+ int stage = 1, r, i;
+
+ vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
+
+ /* Test data page */
+ test_data_page = vm_vaddr_alloc_page(vm);
+ data = (struct test_data *)addr_gva2hva(vm, test_data_page);
+
+ /* Hypercall input/output */
+ data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);
+ data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);
+ memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
+
+ /*
+ * Test pages: the first one is filled with '0x01's, the second with '0x02's
+ * and the test will swap their mappings. The third page keeps the indication
+ * about the current state of mappings.
+ */
+ data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);
+ for (i = 0; i < NTEST_PAGES; i++)
+ memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
+ (u8)(i + 1), PAGE_SIZE);
+ set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_1);
+ set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_2);
+
+ /*
+ * Get PTE pointers for test pages and map them inside the guest.
+ * Use separate page for each PTE for simplicity.
+ */
+ gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
+ for (i = 0; i < NTEST_PAGES; i++) {
+ pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);
+ gpa = addr_hva2gpa(vm, pte);
+ __virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK, PG_LEVEL_4K);
+ data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);
+ }
+
+ /*
+ * Sender vCPU which performs the test: swaps test pages, sets expectation
+ * for 'workers' and issues TLB flush hypercalls.
+ */
+ vcpu_args_set(vcpu[0], 1, test_data_page);
+ vcpu_set_hv_cpuid(vcpu[0]);
+
+ /* Create worker vCPUs which check the contents of the test pages */
+ vcpu[1] = vm_vcpu_add(vm, WORKER_VCPU_ID_1, worker_guest_code);
+ vcpu_args_set(vcpu[1], 1, test_data_page);
+ vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);
+ vcpu_set_hv_cpuid(vcpu[1]);
+
+ vcpu[2] = vm_vcpu_add(vm, WORKER_VCPU_ID_2, worker_guest_code);
+ vcpu_args_set(vcpu[2], 1, test_data_page);
+ vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);
+ vcpu_set_hv_cpuid(vcpu[2]);
+
+ r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
+ TEST_ASSERT(!r, "pthread_create() failed");
+
+ r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
+ TEST_ASSERT(!r, "pthread_create() failed");
+
+ while (true) {
+ vcpu_run(vcpu[0]);
+ exit_reason = vcpu[0]->run->exit_reason;
+
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason: %u (%s)",
+ exit_reason, exit_reason_str(exit_reason));
+
+ switch (get_ucall(vcpu[0], &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == stage,
+ "Unexpected stage: %ld (%d expected)\n",
+ uc.args[1], stage);
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ stage++;
+ }
+
+done:
+ cancel_join_vcpu_thread(threads[0], vcpu[1]);
+ cancel_join_vcpu_thread(threads[1], vcpu[2]);
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 59ffe7fd354f..ea0978f22db8 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -241,10 +241,10 @@ int main(int argc, char **argv)
while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
switch (opt) {
case 'p':
- reclaim_period_ms = atoi(optarg);
+ reclaim_period_ms = atoi_non_negative("Reclaim period", optarg);
break;
case 't':
- token = atoi(optarg);
+ token = atoi_paranoid(optarg);
break;
case 'r':
reboot_permissions = true;
@@ -257,7 +257,6 @@ int main(int argc, char **argv)
}
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
- TEST_REQUIRE(reclaim_period_ms > 0);
__TEST_REQUIRE(token == MAGIC_TOKEN,
"This test must be run with the magic token %d.\n"
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 76417c7d687b..310a104d94f0 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -72,9 +72,6 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
uint64_t msr_platform_info;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index ea4e259a1e2e..2de98fce7edd 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -21,29 +21,6 @@
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
-union cpuid10_eax {
- struct {
- unsigned int version_id:8;
- unsigned int num_counters:8;
- unsigned int bit_width:8;
- unsigned int mask_length:8;
- } split;
- unsigned int full;
-};
-
-union cpuid10_ebx {
- struct {
- unsigned int no_unhalted_core_cycles:1;
- unsigned int no_instructions_retired:1;
- unsigned int no_unhalted_reference_cycles:1;
- unsigned int no_llc_reference:1;
- unsigned int no_llc_misses:1;
- unsigned int no_branch_instruction_retired:1;
- unsigned int no_branch_misses_retired:1;
- } split;
- unsigned int full;
-};
-
/* End of stuff taken from perf_event.h. */
/* Oddly, this isn't in perf_event.h. */
@@ -380,46 +357,31 @@ static void test_pmu_config_disable(void (*guest_code)(void))
}
/*
- * Check for a non-zero PMU version, at least one general-purpose
- * counter per logical processor, an EBX bit vector of length greater
- * than 5, and EBX[5] clear.
- */
-static bool check_intel_pmu_leaf(const struct kvm_cpuid_entry2 *entry)
-{
- union cpuid10_eax eax = { .full = entry->eax };
- union cpuid10_ebx ebx = { .full = entry->ebx };
-
- return eax.split.version_id && eax.split.num_counters > 0 &&
- eax.split.mask_length > ARCH_PERFMON_BRANCHES_RETIRED &&
- !ebx.split.no_branch_instruction_retired;
-}
-
-/*
- * Note that CPUID leaf 0xa is Intel-specific. This leaf should be
- * clear on AMD hardware.
+ * On Intel, check for a non-zero PMU version, at least one general-purpose
+ * counter per logical processor, and support for counting the number of branch
+ * instructions retired.
*/
static bool use_intel_pmu(void)
{
- const struct kvm_cpuid_entry2 *entry;
-
- entry = kvm_get_supported_cpuid_entry(0xa);
- return is_intel_cpu() && check_intel_pmu_leaf(entry);
+ return is_intel_cpu() &&
+ kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
+ kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
+ kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
}
-static bool is_zen1(uint32_t eax)
+static bool is_zen1(uint32_t family, uint32_t model)
{
- return x86_family(eax) == 0x17 && x86_model(eax) <= 0x0f;
+ return family == 0x17 && model <= 0x0f;
}
-static bool is_zen2(uint32_t eax)
+static bool is_zen2(uint32_t family, uint32_t model)
{
- return x86_family(eax) == 0x17 &&
- x86_model(eax) >= 0x30 && x86_model(eax) <= 0x3f;
+ return family == 0x17 && model >= 0x30 && model <= 0x3f;
}
-static bool is_zen3(uint32_t eax)
+static bool is_zen3(uint32_t family, uint32_t model)
{
- return x86_family(eax) == 0x19 && x86_model(eax) <= 0x0f;
+ return family == 0x19 && model <= 0x0f;
}
/*
@@ -432,13 +394,13 @@ static bool is_zen3(uint32_t eax)
*/
static bool use_amd_pmu(void)
{
- const struct kvm_cpuid_entry2 *entry;
+ uint32_t family = kvm_cpu_family();
+ uint32_t model = kvm_cpu_model();
- entry = kvm_get_supported_cpuid_entry(1);
return is_amd_cpu() &&
- (is_zen1(entry->eax) ||
- is_zen2(entry->eax) ||
- is_zen3(entry->eax));
+ (is_zen1(family, model) ||
+ is_zen2(family, model) ||
+ is_zen3(family, model));
}
int main(int argc, char *argv[])
@@ -447,9 +409,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER));
TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 2bb08bf2125d..a284fcef6ed7 100644
--- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -82,9 +82,6 @@ int main(int argc, char *argv[])
uint64_t cr4;
int rc;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
/*
* Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and
* use it to verify all supported CR4 bits can be set prior to defining
diff --git a/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
new file mode 100644
index 000000000000..06edf00a97d6
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ *
+ * Test that KVM emulates instructions in response to EPT violations when
+ * allow_smaller_maxphyaddr is enabled and guest.MAXPHYADDR < host.MAXPHYADDR.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+
+#include "flds_emulation.h"
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define MAXPHYADDR 36
+
+#define MEM_REGION_GVA 0x0000123456789000
+#define MEM_REGION_GPA 0x0000000700000000
+#define MEM_REGION_SLOT 10
+#define MEM_REGION_SIZE PAGE_SIZE
+
+static void guest_code(bool tdp_enabled)
+{
+ uint64_t error_code;
+ uint64_t vector;
+
+ vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA));
+
+ /*
+ * When TDP is enabled, flds will trigger an emulation failure, exit to
+ * userspace, and then the selftest host "VMM" skips the instruction.
+ *
+ * When TDP is disabled, no instruction emulation is required so flds
+ * should generate #PF(RSVD).
+ */
+ if (tdp_enabled) {
+ GUEST_ASSERT(!vector);
+ } else {
+ GUEST_ASSERT_EQ(vector, PF_VECTOR);
+ GUEST_ASSERT(error_code & PFERR_RSVD_MASK);
+ }
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ uint64_t *pte;
+ uint64_t *hva;
+ uint64_t gpa;
+ int rc;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_args_set(vcpu, 1, kvm_is_tdp_enabled());
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR);
+
+ rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
+ TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
+ vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ MEM_REGION_GPA, MEM_REGION_SLOT,
+ MEM_REGION_SIZE / PAGE_SIZE, 0);
+ gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
+ MEM_REGION_GPA, MEM_REGION_SLOT);
+ TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
+ virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
+ hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+ memset(hva, 0, PAGE_SIZE);
+
+ pte = vm_get_page_table_entry(vm, MEM_REGION_GVA);
+ *pte |= BIT_ULL(MAXPHYADDR);
+
+ vcpu_run(vcpu);
+
+ /*
+ * When TDP is enabled, KVM must emulate in response the guest physical
+ * address that is illegal from the guest's perspective, but is legal
+ * from hardware's perspeective. This should result in an emulation
+ * failure exit to userspace since KVM doesn't support emulating flds.
+ */
+ if (kvm_is_tdp_enabled()) {
+ handle_flds_emulation_failure_exit(vcpu);
+ vcpu_run(vcpu);
+ }
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unrecognized ucall: %lu\n", uc.cmd);
+ }
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index 1f136a81858e..cb38a478e1f6 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -137,6 +137,8 @@ int main(int argc, char *argv[])
struct kvm_x86_state *state;
int stage, stage_reported;
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
+
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
new file mode 100644
index 000000000000..e73fcdef47bb
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_nested_shutdown_test
+ *
+ * Copyright (C) 2022, Red Hat, Inc.
+ *
+ * Nested SVM testing: test that unintercepted shutdown in L2 doesn't crash the host
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+ __asm__ __volatile__("ud2");
+}
+
+static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+ idt[6].p = 0; // #UD is intercepted but its injection will cause #NP
+ idt[11].p = 0; // #NP is not intercepted and will cause another
+ // #NP that will be converted to #DF
+ idt[8].p = 0; // #DF will cause #NP which will cause SHUTDOWN
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ /* should not reach here */
+ GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ vm_vaddr_t svm_gva;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vcpu_alloc_svm(vm, &svm_gva);
+
+ vcpu_args_set(vcpu, 2, svm_gva, vm->idt);
+ run = vcpu->run;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
index e637d7736012..e497ace629c1 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
@@ -194,9 +194,6 @@ done:
int main(int argc, char *argv[])
{
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 9b6db0b0b13e..d2f9b5bdfab2 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -90,9 +90,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu_events events;
int rv, cap;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
diff --git a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
index 70b44f0b52fe..ead5d878a71c 100644
--- a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
+++ b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
@@ -3,6 +3,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
+#include "svm_util.h"
#include <string.h>
#include <sys/ioctl.h>
@@ -20,10 +21,11 @@ static void l2_guest_code(void)
: : [port] "d" (ARBITRARY_IO_PORT) : "rax");
}
-void l1_guest_code(struct vmx_pages *vmx)
-{
#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+void l1_guest_code_vmx(struct vmx_pages *vmx)
+{
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
@@ -38,24 +40,53 @@ void l1_guest_code(struct vmx_pages *vmx)
GUEST_DONE();
}
+void l1_guest_code_svm(struct svm_test_data *svm)
+{
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* don't intercept shutdown to test the case of SVM allowing to do so */
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ /* should not reach here, L1 should crash */
+ GUEST_ASSERT(0);
+}
+
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vcpu_events events;
- vm_vaddr_t vmx_pages_gva;
struct ucall uc;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
+ bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
+
+ TEST_REQUIRE(has_vmx || has_svm);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
- vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
- vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
+ if (has_vmx) {
+ vm_vaddr_t vmx_pages_gva;
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ } else {
+ vm_vaddr_t svm_gva;
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
+ vcpu_alloc_svm(vm, &svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
+ }
+
+ vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
run = vcpu->run;
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -78,13 +109,21 @@ int main(void)
"No triple fault pending");
vcpu_run(vcpu);
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_DONE:
- break;
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- default:
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
- }
+ if (has_svm) {
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ } else {
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+ }
+ return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
index 7316521428f8..91076c9787b4 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -56,9 +56,6 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct ucall uc;
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
index a4f06370a245..25fa55344a10 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
@@ -733,16 +733,98 @@ static void test_msr_permission_bitmap(void)
kvm_vm_free(vm);
}
-int main(int argc, char *argv[])
+#define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
+({ \
+ int r = __vm_ioctl(vm, cmd, arg); \
+ \
+ if (flag & valid_mask) \
+ TEST_ASSERT(!r, __KVM_IOCTL_ERROR(#cmd, r)); \
+ else \
+ TEST_ASSERT(r == -1 && errno == EINVAL, \
+ "Wanted EINVAL for %s with flag = 0x%llx, got rc: %i errno: %i (%s)", \
+ #cmd, flag, r, errno, strerror(errno)); \
+})
+
+static void run_user_space_msr_flag_test(struct kvm_vm *vm)
{
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
+ struct kvm_enable_cap cap = { .cap = KVM_CAP_X86_USER_SPACE_MSR };
+ int nflags = sizeof(cap.args[0]) * BITS_PER_BYTE;
+ int rc;
+ int i;
+
+ rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
+ TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
+
+ for (i = 0; i < nflags; i++) {
+ cap.args[0] = BIT_ULL(i);
+ test_user_exit_msr_ioctl(vm, KVM_ENABLE_CAP, &cap,
+ BIT_ULL(i), KVM_MSR_EXIT_REASON_VALID_MASK);
+ }
+}
+
+static void run_msr_filter_flag_test(struct kvm_vm *vm)
+{
+ u64 deny_bits = 0;
+ struct kvm_msr_filter filter = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+ .ranges = {
+ {
+ .flags = KVM_MSR_FILTER_READ,
+ .nmsrs = 1,
+ .base = 0,
+ .bitmap = (uint8_t *)&deny_bits,
+ },
+ },
+ };
+ int nflags;
+ int rc;
+ int i;
+
+ rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
+ TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
+
+ nflags = sizeof(filter.flags) * BITS_PER_BYTE;
+ for (i = 0; i < nflags; i++) {
+ filter.flags = BIT_ULL(i);
+ test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
+ BIT_ULL(i), KVM_MSR_FILTER_VALID_MASK);
+ }
+ filter.flags = KVM_MSR_FILTER_DEFAULT_ALLOW;
+ nflags = sizeof(filter.ranges[0].flags) * BITS_PER_BYTE;
+ for (i = 0; i < nflags; i++) {
+ filter.ranges[0].flags = BIT_ULL(i);
+ test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
+ BIT_ULL(i), KVM_MSR_FILTER_RANGE_VALID_MASK);
+ }
+}
+
+/* Test that attempts to write to the unused bits in a flag fails. */
+static void test_user_exit_msr_flags(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ /* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
+ run_user_space_msr_flag_test(vm);
+
+ /* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
+ run_msr_filter_flag_test(vm);
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
test_msr_filter_allow();
test_msr_filter_deny();
test_msr_permission_bitmap();
+ test_user_exit_msr_flags();
+
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 2d8c23d639f7..f0456fb031b1 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -78,6 +78,7 @@ int main(int argc, char *argv[])
bool done = false;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has_ept());
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
index 069589c52f41..c280ba1e6572 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -20,16 +20,6 @@
#define PMU_CAP_FW_WRITES (1ULL << 13)
#define PMU_CAP_LBR_FMT 0x3f
-union cpuid10_eax {
- struct {
- unsigned int version_id:8;
- unsigned int num_counters:8;
- unsigned int bit_width:8;
- unsigned int mask_length:8;
- } split;
- unsigned int full;
-};
-
union perf_capabilities {
struct {
u64 lbr_format:6;
@@ -53,11 +43,9 @@ static void guest_code(void)
int main(int argc, char *argv[])
{
- const struct kvm_cpuid_entry2 *entry_a_0;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
int ret;
- union cpuid10_eax eax;
union perf_capabilities host_cap;
uint64_t val;
@@ -69,11 +57,8 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
- TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa);
- entry_a_0 = kvm_get_supported_cpuid_entry(0xa);
-
- eax.full = entry_a_0->eax;
- __TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU");
+ TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
+ TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
/* testcase 1, set capabilities when we have PDCM bit */
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
index 6f7a5ef66718..d7d37dae3eeb 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
@@ -114,7 +114,9 @@ static void test_icr(struct xapic_vcpu *x)
* vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff.
*/
icr = APIC_INT_ASSERT | 0xff;
- for (i = vcpu->id + 1; i < 0xff; i++) {
+ for (i = 0; i < 0xff; i++) {
+ if (i == vcpu->id)
+ continue;
for (j = 0; j < 8; j++)
__test_icr(x, i << (32 + 24) | icr | (j << 8));
}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 8a5cb800f50e..2a5727188c8d 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -15,9 +15,13 @@
#include <time.h>
#include <sched.h>
#include <signal.h>
+#include <pthread.h>
#include <sys/eventfd.h>
+/* Defined in include/linux/kvm_types.h */
+#define GPA_INVALID (~(ulong)0)
+
#define SHINFO_REGION_GVA 0xc0000000ULL
#define SHINFO_REGION_GPA 0xc0000000ULL
#define SHINFO_REGION_SLOT 10
@@ -44,6 +48,8 @@
#define MIN_STEAL_TIME 50000
+#define SHINFO_RACE_TIMEOUT 2 /* seconds */
+
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_event_channel_op 32
@@ -126,7 +132,7 @@ struct {
struct kvm_irq_routing_entry entries[2];
} irq_routes;
-bool guest_saw_irq;
+static volatile bool guest_saw_irq;
static void evtchn_handler(struct ex_regs *regs)
{
@@ -148,6 +154,7 @@ static void guest_wait_for_irq(void)
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
+ int i;
__asm__ __volatile__(
"sti\n"
@@ -325,6 +332,49 @@ static void guest_code(void)
guest_wait_for_irq();
GUEST_SYNC(21);
+ /* Racing host ioctls */
+
+ guest_wait_for_irq();
+
+ GUEST_SYNC(22);
+ /* Racing vmcall against host ioctl */
+
+ ports[0] = 0;
+
+ p = (struct sched_poll) {
+ .ports = ports,
+ .nr_ports = 1,
+ .timeout = 0
+ };
+
+wait_for_timer:
+ /*
+ * Poll for a timer wake event while the worker thread is mucking with
+ * the shared info. KVM XEN drops timer IRQs if the shared info is
+ * invalid when the timer expires. Arbitrarily poll 100 times before
+ * giving up and asking the VMM to re-arm the timer. 100 polls should
+ * consume enough time to beat on KVM without taking too long if the
+ * timer IRQ is dropped due to an invalid event channel.
+ */
+ for (i = 0; i < 100 && !guest_saw_irq; i++)
+ asm volatile("vmcall"
+ : "=a" (rax)
+ : "a" (__HYPERVISOR_sched_op),
+ "D" (SCHEDOP_poll),
+ "S" (&p)
+ : "memory");
+
+ /*
+ * Re-send the timer IRQ if it was (likely) dropped due to the timer
+ * expiring while the event channel was invalid.
+ */
+ if (!guest_saw_irq) {
+ GUEST_SYNC(23);
+ goto wait_for_timer;
+ }
+ guest_saw_irq = false;
+
+ GUEST_SYNC(24);
}
static int cmp_timespec(struct timespec *a, struct timespec *b)
@@ -352,11 +402,36 @@ static void handle_alrm(int sig)
TEST_FAIL("IRQ delivery timed out");
}
+static void *juggle_shinfo_state(void *arg)
+{
+ struct kvm_vm *vm = (struct kvm_vm *)arg;
+
+ struct kvm_xen_hvm_attr cache_init = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
+ };
+
+ struct kvm_xen_hvm_attr cache_destroy = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.gfn = GPA_INVALID
+ };
+
+ for (;;) {
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_init);
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_destroy);
+ pthread_testcancel();
+ };
+
+ return NULL;
+}
+
int main(int argc, char *argv[])
{
struct timespec min_ts, max_ts, vm_ts;
struct kvm_vm *vm;
+ pthread_t thread;
bool verbose;
+ int ret;
verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
!strncmp(argv[1], "--verbose", 10));
@@ -785,6 +860,71 @@ int main(int argc, char *argv[])
case 21:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
+ alarm(0);
+
+ if (verbose)
+ printf("Testing shinfo lock corruption (KVM_XEN_HVM_EVTCHN_SEND)\n");
+
+ ret = pthread_create(&thread, NULL, &juggle_shinfo_state, (void *)vm);
+ TEST_ASSERT(ret == 0, "pthread_create() failed: %s", strerror(ret));
+
+ struct kvm_irq_routing_xen_evtchn uxe = {
+ .port = 1,
+ .vcpu = vcpu->id,
+ .priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL
+ };
+
+ evtchn_irq_expected = true;
+ for (time_t t = time(NULL) + SHINFO_RACE_TIMEOUT; time(NULL) < t;)
+ __vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe);
+ break;
+
+ case 22:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+
+ if (verbose)
+ printf("Testing shinfo lock corruption (SCHEDOP_poll)\n");
+
+ shinfo->evtchn_pending[0] = 1;
+
+ evtchn_irq_expected = true;
+ tmr.u.timer.expires_ns = rs->state_entry_time +
+ SHINFO_RACE_TIMEOUT * 1000000000ULL;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ break;
+
+ case 23:
+ /*
+ * Optional and possibly repeated sync point.
+ * Injecting the timer IRQ may fail if the
+ * shinfo is invalid when the timer expires.
+ * If the timer has expired but the IRQ hasn't
+ * been delivered, rearm the timer and retry.
+ */
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
+
+ /* Resume the guest if the timer is still pending. */
+ if (tmr.u.timer.expires_ns)
+ break;
+
+ /* All done if the IRQ was delivered. */
+ if (!evtchn_irq_expected)
+ break;
+
+ tmr.u.timer.expires_ns = rs->state_entry_time +
+ SHINFO_RACE_TIMEOUT * 1000000000ULL;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ break;
+ case 24:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+
+ ret = pthread_cancel(thread);
+ TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
+
+ ret = pthread_join(thread, 0);
+ TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
goto done;
case 0x20:
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
index 6632bfff486b..348e2dbdb4e0 100644
--- a/tools/testing/selftests/landlock/Makefile
+++ b/tools/testing/selftests/landlock/Makefile
@@ -3,7 +3,6 @@
# First run: make -C ../../../.. headers_install
CFLAGS += -Wall -O2 $(KHDR_INCLUDES)
-LDLIBS += -lcap
LOCAL_HDRS += common.h
@@ -13,10 +12,12 @@ TEST_GEN_PROGS := $(src_test:.c=)
TEST_GEN_PROGS_EXTENDED := true
-# Static linking for short targets:
+# Short targets:
+$(TEST_GEN_PROGS): LDLIBS += -lcap
$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
include ../lib.mk
-# Static linking for targets with $(OUTPUT)/ prefix:
+# Targets with $(OUTPUT)/ prefix:
+$(TEST_GEN_PROGS): LDLIBS += -lcap
$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 9d4cb94cf437..a3ea3d4a206d 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -70,7 +70,7 @@ endef
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
@@ -84,7 +84,7 @@ endif
define INSTALL_SINGLE_RULE
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
- $(if $(INSTALL_LIST),rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
endef
define INSTALL_RULE
diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
index 74ee5067a8ce..611be86eaf3d 100755
--- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
@@ -138,7 +138,6 @@ online_all_offline_memory()
{
for memory in `hotpluggable_offline_memory`; do
if ! online_memory_expect_success $memory; then
- echo "$FUNCNAME $memory: unexpected fail" >&2
retval=1
fi
done
diff --git a/tools/testing/selftests/net/openvswitch/Makefile b/tools/testing/selftests/net/openvswitch/Makefile
new file mode 100644
index 000000000000..2f1508abc826
--- /dev/null
+++ b/tools/testing/selftests/net/openvswitch/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+top_srcdir = ../../../../..
+
+CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+
+TEST_PROGS := openvswitch.sh
+
+TEST_FILES := ovs-dpctl.py
+
+EXTRA_CLEAN := test_netlink_checks
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
new file mode 100755
index 000000000000..7ce46700a3ae
--- /dev/null
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -0,0 +1,218 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# OVS kernel module self tests
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+PAUSE_ON_FAIL=no
+VERBOSE=0
+TRACING=0
+
+tests="
+ netlink_checks ovsnl: validate netlink attrs and settings"
+
+info() {
+ [ $VERBOSE = 0 ] || echo $*
+}
+
+ovs_base=`pwd`
+sbxs=
+sbx_add () {
+ info "adding sandbox '$1'"
+
+ sbxs="$sbxs $1"
+
+ NO_BIN=0
+
+ # Create sandbox.
+ local d="$ovs_base"/$1
+ if [ -e $d ]; then
+ info "removing $d"
+ rm -rf "$d"
+ fi
+ mkdir "$d" || return 1
+ ovs_setenv $1
+}
+
+ovs_exit_sig() {
+ [ -e ${ovs_dir}/cleanup ] && . "$ovs_dir/cleanup"
+}
+
+on_exit() {
+ echo "$1" > ${ovs_dir}/cleanup.tmp
+ cat ${ovs_dir}/cleanup >> ${ovs_dir}/cleanup.tmp
+ mv ${ovs_dir}/cleanup.tmp ${ovs_dir}/cleanup
+}
+
+ovs_setenv() {
+ sandbox=$1
+
+ ovs_dir=$ovs_base${1:+/$1}; export ovs_dir
+
+ test -e ${ovs_dir}/cleanup || : > ${ovs_dir}/cleanup
+}
+
+ovs_sbx() {
+ if test "X$2" != X; then
+ (ovs_setenv $1; shift; "$@" >> ${ovs_dir}/debug.log)
+ else
+ ovs_setenv $1
+ fi
+}
+
+ovs_add_dp () {
+ info "Adding DP/Bridge IF: sbx:$1 dp:$2 {$3, $4, $5}"
+ sbxname="$1"
+ shift
+ ovs_sbx "$sbxname" python3 $ovs_base/ovs-dpctl.py add-dp $*
+ on_exit "ovs_sbx $sbxname python3 $ovs_base/ovs-dpctl.py del-dp $1;"
+}
+
+usage() {
+ echo
+ echo "$0 [OPTIONS] [TEST]..."
+ echo "If no TEST argument is given, all tests will be run."
+ echo
+ echo "Options"
+ echo " -t: capture traffic via tcpdump"
+ echo " -v: verbose"
+ echo " -p: pause on failure"
+ echo
+ echo "Available tests${tests}"
+ exit 1
+}
+
+# netlink_validation
+# - Create a dp
+# - check no warning with "old version" simulation
+test_netlink_checks () {
+ sbx_add "test_netlink_checks" || return 1
+
+ info "setting up new DP"
+ ovs_add_dp "test_netlink_checks" nv0 || return 1
+ # now try again
+ PRE_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
+ ovs_add_dp "test_netlink_checks" nv0 -V 0 || return 1
+ POST_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
+ if [ "$PRE_TEST" != "$POST_TEST" ]; then
+ info "failed - gen warning"
+ return 1
+ fi
+
+ return 0
+}
+
+run_test() {
+ (
+ tname="$1"
+ tdesc="$2"
+
+ if ! lsmod | grep openvswitch >/dev/null 2>&1; then
+ stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
+ return $ksft_skip
+ fi
+
+ if python3 ovs-dpctl.py -h 2>&1 | \
+ grep "Need to install the python" >/dev/null 2>&1; then
+ stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
+ return $ksft_skip
+ fi
+ printf "TEST: %-60s [START]\n" "${tname}"
+
+ unset IFS
+
+ eval test_${tname}
+ ret=$?
+
+ if [ $ret -eq 0 ]; then
+ printf "TEST: %-60s [ OK ]\n" "${tdesc}"
+ ovs_exit_sig
+ rm -rf "$ovs_dir"
+ elif [ $ret -eq 1 ]; then
+ printf "TEST: %-60s [FAIL]\n" "${tdesc}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "Pausing. Logs in $ovs_dir/. Hit enter to continue"
+ read a
+ fi
+ ovs_exit_sig
+ [ "${PAUSE_ON_FAIL}" = "yes" ] || rm -rf "$ovs_dir"
+ exit 1
+ elif [ $ret -eq $ksft_skip ]; then
+ printf "TEST: %-60s [SKIP]\n" "${tdesc}"
+ elif [ $ret -eq 2 ]; then
+ rm -rf test_${tname}
+ run_test "$1" "$2"
+ fi
+
+ return $ret
+ )
+ ret=$?
+ case $ret in
+ 0)
+ [ $all_skipped = true ] && [ $exitcode=$ksft_skip ] && exitcode=0
+ all_skipped=false
+ ;;
+ $ksft_skip)
+ [ $all_skipped = true ] && exitcode=$ksft_skip
+ ;;
+ *)
+ all_skipped=false
+ exitcode=1
+ ;;
+ esac
+
+ return $ret
+}
+
+
+exitcode=0
+desc=0
+all_skipped=true
+
+while getopts :pvt o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=1;;
+ t) if which tcpdump > /dev/null 2>&1; then
+ TRACING=1
+ else
+ echo "=== tcpdump not available, tracing disabled"
+ fi
+ ;;
+ *) usage;;
+ esac
+done
+shift $(($OPTIND-1))
+
+IFS="
+"
+
+for arg do
+ # Check first that all requested tests are available before running any
+ command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
+done
+
+name=""
+desc=""
+for t in ${tests}; do
+ [ "${name}" = "" ] && name="${t}" && continue
+ [ "${desc}" = "" ] && desc="${t}"
+
+ run_this=1
+ for arg do
+ [ "${arg}" != "${arg#--*}" ] && continue
+ [ "${arg}" = "${name}" ] && run_this=1 && break
+ run_this=0
+ done
+ if [ $run_this -eq 1 ]; then
+ run_test "${name}" "${desc}"
+ fi
+ name=""
+ desc=""
+done
+
+exit ${exitcode}
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
new file mode 100644
index 000000000000..3243c90d449e
--- /dev/null
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+# Controls the openvswitch module. Part of the kselftest suite, but
+# can be used for some diagnostic purpose as well.
+
+import argparse
+import errno
+import sys
+
+try:
+ from pyroute2 import NDB
+
+ from pyroute2.netlink import NLM_F_ACK
+ from pyroute2.netlink import NLM_F_REQUEST
+ from pyroute2.netlink import genlmsg
+ from pyroute2.netlink import nla
+ from pyroute2.netlink.exceptions import NetlinkError
+ from pyroute2.netlink.generic import GenericNetlinkSocket
+except ModuleNotFoundError:
+ print("Need to install the python pyroute2 package.")
+ sys.exit(0)
+
+
+OVS_DATAPATH_FAMILY = "ovs_datapath"
+OVS_VPORT_FAMILY = "ovs_vport"
+OVS_FLOW_FAMILY = "ovs_flow"
+OVS_PACKET_FAMILY = "ovs_packet"
+OVS_METER_FAMILY = "ovs_meter"
+OVS_CT_LIMIT_FAMILY = "ovs_ct_limit"
+
+OVS_DATAPATH_VERSION = 2
+OVS_DP_CMD_NEW = 1
+OVS_DP_CMD_DEL = 2
+OVS_DP_CMD_GET = 3
+OVS_DP_CMD_SET = 4
+
+OVS_VPORT_CMD_NEW = 1
+OVS_VPORT_CMD_DEL = 2
+OVS_VPORT_CMD_GET = 3
+OVS_VPORT_CMD_SET = 4
+
+
+class ovs_dp_msg(genlmsg):
+ # include the OVS version
+ # We need a custom header rather than just being able to rely on
+ # genlmsg because fields ends up not expressing everything correctly
+ # if we use the canonical example of setting fields = (('customfield',),)
+ fields = genlmsg.fields + (("dpifindex", "I"),)
+
+
+class OvsDatapath(GenericNetlinkSocket):
+
+ OVS_DP_F_VPORT_PIDS = 1 << 1
+ OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3
+
+ class dp_cmd_msg(ovs_dp_msg):
+ """
+ Message class that will be used to communicate with the kernel module.
+ """
+
+ nla_map = (
+ ("OVS_DP_ATTR_UNSPEC", "none"),
+ ("OVS_DP_ATTR_NAME", "asciiz"),
+ ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
+ ("OVS_DP_ATTR_STATS", "dpstats"),
+ ("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
+ ("OVS_DP_ATTR_USER_FEATURES", "uint32"),
+ ("OVS_DP_ATTR_PAD", "none"),
+ ("OVS_DP_ATTR_MASKS_CACHE_SIZE", "uint32"),
+ ("OVS_DP_ATTR_PER_CPU_PIDS", "array(uint32)"),
+ )
+
+ class dpstats(nla):
+ fields = (
+ ("hit", "=Q"),
+ ("missed", "=Q"),
+ ("lost", "=Q"),
+ ("flows", "=Q"),
+ )
+
+ class megaflowstats(nla):
+ fields = (
+ ("mask_hit", "=Q"),
+ ("masks", "=I"),
+ ("padding", "=I"),
+ ("cache_hits", "=Q"),
+ ("pad1", "=Q"),
+ )
+
+ def __init__(self):
+ GenericNetlinkSocket.__init__(self)
+ self.bind(OVS_DATAPATH_FAMILY, OvsDatapath.dp_cmd_msg)
+
+ def info(self, dpname, ifindex=0):
+ msg = OvsDatapath.dp_cmd_msg()
+ msg["cmd"] = OVS_DP_CMD_GET
+ msg["version"] = OVS_DATAPATH_VERSION
+ msg["reserved"] = 0
+ msg["dpifindex"] = ifindex
+ msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+ try:
+ reply = self.nlm_request(
+ msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ if ne.code == errno.ENODEV:
+ reply = None
+ else:
+ raise ne
+
+ return reply
+
+ def create(self, dpname, shouldUpcall=False, versionStr=None):
+ msg = OvsDatapath.dp_cmd_msg()
+ msg["cmd"] = OVS_DP_CMD_NEW
+ if versionStr is None:
+ msg["version"] = OVS_DATAPATH_VERSION
+ else:
+ msg["version"] = int(versionStr.split(":")[0], 0)
+ msg["reserved"] = 0
+ msg["dpifindex"] = 0
+ msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+ dpfeatures = 0
+ if versionStr is not None and versionStr.find(":") != -1:
+ dpfeatures = int(versionStr.split(":")[1], 0)
+ else:
+ dpfeatures = OvsDatapath.OVS_DP_F_VPORT_PIDS
+
+ msg["attrs"].append(["OVS_DP_ATTR_USER_FEATURES", dpfeatures])
+ if not shouldUpcall:
+ msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", 0])
+
+ try:
+ reply = self.nlm_request(
+ msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ if ne.code == errno.EEXIST:
+ reply = None
+ else:
+ raise ne
+
+ return reply
+
+ def destroy(self, dpname):
+ msg = OvsDatapath.dp_cmd_msg()
+ msg["cmd"] = OVS_DP_CMD_DEL
+ msg["version"] = OVS_DATAPATH_VERSION
+ msg["reserved"] = 0
+ msg["dpifindex"] = 0
+ msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
+
+ try:
+ reply = self.nlm_request(
+ msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ if ne.code == errno.ENODEV:
+ reply = None
+ else:
+ raise ne
+
+ return reply
+
+
+class OvsVport(GenericNetlinkSocket):
+ class ovs_vport_msg(ovs_dp_msg):
+ nla_map = (
+ ("OVS_VPORT_ATTR_UNSPEC", "none"),
+ ("OVS_VPORT_ATTR_PORT_NO", "uint32"),
+ ("OVS_VPORT_ATTR_TYPE", "uint32"),
+ ("OVS_VPORT_ATTR_NAME", "asciiz"),
+ ("OVS_VPORT_ATTR_OPTIONS", "none"),
+ ("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
+ ("OVS_VPORT_ATTR_STATS", "vportstats"),
+ ("OVS_VPORT_ATTR_PAD", "none"),
+ ("OVS_VPORT_ATTR_IFINDEX", "uint32"),
+ ("OVS_VPORT_ATTR_NETNSID", "uint32"),
+ )
+
+ class vportstats(nla):
+ fields = (
+ ("rx_packets", "=Q"),
+ ("tx_packets", "=Q"),
+ ("rx_bytes", "=Q"),
+ ("tx_bytes", "=Q"),
+ ("rx_errors", "=Q"),
+ ("tx_errors", "=Q"),
+ ("rx_dropped", "=Q"),
+ ("tx_dropped", "=Q"),
+ )
+
+ def type_to_str(vport_type):
+ if vport_type == 1:
+ return "netdev"
+ elif vport_type == 2:
+ return "internal"
+ elif vport_type == 3:
+ return "gre"
+ elif vport_type == 4:
+ return "vxlan"
+ elif vport_type == 5:
+ return "geneve"
+ return "unknown:%d" % vport_type
+
+ def __init__(self):
+ GenericNetlinkSocket.__init__(self)
+ self.bind(OVS_VPORT_FAMILY, OvsVport.ovs_vport_msg)
+
+ def info(self, vport_name, dpifindex=0, portno=None):
+ msg = OvsVport.ovs_vport_msg()
+
+ msg["cmd"] = OVS_VPORT_CMD_GET
+ msg["version"] = OVS_DATAPATH_VERSION
+ msg["reserved"] = 0
+ msg["dpifindex"] = dpifindex
+
+ if portno is None:
+ msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_name])
+ else:
+ msg["attrs"].append(["OVS_VPORT_ATTR_PORT_NO", portno])
+
+ try:
+ reply = self.nlm_request(
+ msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ if ne.code == errno.ENODEV:
+ reply = None
+ else:
+ raise ne
+ return reply
+
+
+def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()):
+ dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME")
+ base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS")
+ megaflow_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_MEGAFLOW_STATS")
+ user_features = dp_lookup_rep.get_attr("OVS_DP_ATTR_USER_FEATURES")
+ masks_cache_size = dp_lookup_rep.get_attr("OVS_DP_ATTR_MASKS_CACHE_SIZE")
+
+ print("%s:" % dp_name)
+ print(
+ " lookups: hit:%d missed:%d lost:%d"
+ % (base_stats["hit"], base_stats["missed"], base_stats["lost"])
+ )
+ print(" flows:%d" % base_stats["flows"])
+ pkts = base_stats["hit"] + base_stats["missed"]
+ avg = (megaflow_stats["mask_hit"] / pkts) if pkts != 0 else 0.0
+ print(
+ " masks: hit:%d total:%d hit/pkt:%f"
+ % (megaflow_stats["mask_hit"], megaflow_stats["masks"], avg)
+ )
+ print(" caches:")
+ print(" masks-cache: size:%d" % masks_cache_size)
+
+ if user_features is not None:
+ print(" features: 0x%X" % user_features)
+
+ # port print out
+ vpl = OvsVport()
+ for iface in ndb.interfaces:
+ rep = vpl.info(iface.ifname, ifindex)
+ if rep is not None:
+ print(
+ " port %d: %s (%s)"
+ % (
+ rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
+ rep.get_attr("OVS_VPORT_ATTR_NAME"),
+ OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
+ )
+ )
+
+
+def main(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="count",
+ help="Increment 'verbose' output counter.",
+ )
+ subparsers = parser.add_subparsers()
+
+ showdpcmd = subparsers.add_parser("show")
+ showdpcmd.add_argument(
+ "showdp", metavar="N", type=str, nargs="?", help="Datapath Name"
+ )
+
+ adddpcmd = subparsers.add_parser("add-dp")
+ adddpcmd.add_argument("adddp", help="Datapath Name")
+ adddpcmd.add_argument(
+ "-u",
+ "--upcall",
+ action="store_true",
+ help="Leave open a reader for upcalls",
+ )
+ adddpcmd.add_argument(
+ "-V",
+ "--versioning",
+ required=False,
+ help="Specify a custom version / feature string",
+ )
+
+ deldpcmd = subparsers.add_parser("del-dp")
+ deldpcmd.add_argument("deldp", help="Datapath Name")
+
+ args = parser.parse_args()
+
+ ovsdp = OvsDatapath()
+ ndb = NDB()
+
+ if hasattr(args, "showdp"):
+ found = False
+ for iface in ndb.interfaces:
+ rep = None
+ if args.showdp is None:
+ rep = ovsdp.info(iface.ifname, 0)
+ elif args.showdp == iface.ifname:
+ rep = ovsdp.info(iface.ifname, 0)
+
+ if rep is not None:
+ found = True
+ print_ovsdp_full(rep, iface.index, ndb)
+
+ if not found:
+ msg = "No DP found"
+ if args.showdp is not None:
+ msg += ":'%s'" % args.showdp
+ print(msg)
+ elif hasattr(args, "adddp"):
+ rep = ovsdp.create(args.adddp, args.upcall, args.versioning)
+ if rep is None:
+ print("DP '%s' already exists" % args.adddp)
+ else:
+ print("DP '%s' added" % args.adddp)
+ elif hasattr(args, "deldp"):
+ ovsdp.destroy(args.deldp)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv))
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index f4a2f28f926b..778b6cdc8aed 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -g -I../../../../usr/include/ -pthread
+CFLAGS += -g -I../../../../usr/include/ -pthread -Wall
TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 9a2d64901d59..e2dd4ed84984 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -413,7 +413,7 @@ static void poll_pidfd(const char *test_name, int pidfd)
c = epoll_wait(epoll_fd, events, MAX_EVENTS, 5000);
if (c != 1 || !(events[0].events & EPOLLIN))
- ksft_exit_fail_msg("%s test: Unexpected epoll_wait result (c=%d, events=%x) ",
+ ksft_exit_fail_msg("%s test: Unexpected epoll_wait result (c=%d, events=%x) "
"(errno %d)\n",
test_name, c, events[0].events, errno);
@@ -435,6 +435,8 @@ static int child_poll_exec_test(void *args)
*/
while (1)
sleep(1);
+
+ return 0;
}
static void test_pidfd_poll_exec(int use_waitpid)
diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
index 070c1c876df1..0dcb8365ddc3 100644
--- a/tools/testing/selftests/pidfd/pidfd_wait.c
+++ b/tools/testing/selftests/pidfd/pidfd_wait.c
@@ -95,20 +95,28 @@ TEST(wait_states)
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
+ int pfd[2];
pid_t pid;
siginfo_t info = {
.si_signo = 0,
};
+ ASSERT_EQ(pipe(pfd), 0);
pid = sys_clone3(&args);
ASSERT_GE(pid, 0);
if (pid == 0) {
+ char buf[2];
+
+ close(pfd[1]);
kill(getpid(), SIGSTOP);
+ ASSERT_EQ(read(pfd[0], buf, 1), 1);
+ close(pfd[0]);
kill(getpid(), SIGSTOP);
exit(EXIT_SUCCESS);
}
+ close(pfd[0]);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
@@ -117,6 +125,8 @@ TEST(wait_states)
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0);
ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL), 0);
+ ASSERT_EQ(write(pfd[1], "C", 1), 1);
+ close(pfd[1]);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_CONTINUED);
ASSERT_EQ(info.si_pid, parent_tid);
@@ -138,7 +148,7 @@ TEST(wait_states)
TEST(wait_nonblock)
{
- int pidfd, status = 0;
+ int pidfd;
unsigned int flags = 0;
pid_t parent_tid = -1;
struct clone_args args = {