diff options
Diffstat (limited to 'tools')
58 files changed, 2043 insertions, 729 deletions
diff --git a/tools/arch/loongarch/include/uapi/asm/bitsperlong.h b/tools/arch/loongarch/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..d4e32b3d4843 --- /dev/null +++ b/tools/arch/loongarch/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_LOONGARCH_BITSPERLONG_H +#define __ASM_LOONGARCH_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_LOONGARCH_BITSPERLONG_H */ diff --git a/tools/arch/x86/include/asm/orc_types.h b/tools/arch/x86/include/asm/orc_types.h index 5a2baf28a1dc..1343a62106de 100644 --- a/tools/arch/x86/include/asm/orc_types.h +++ b/tools/arch/x86/include/asm/orc_types.h @@ -57,12 +57,14 @@ struct orc_entry { unsigned sp_reg:4; unsigned bp_reg:4; unsigned type:2; + unsigned signal:1; unsigned end:1; #elif defined(__BIG_ENDIAN_BITFIELD) unsigned bp_reg:4; unsigned sp_reg:4; - unsigned unused:5; + unsigned unused:4; unsigned end:1; + unsigned signal:1; unsigned type:2; #endif } __packed; diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index 62c54ffbeeaa..9ac3df3fccf0 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -15,6 +15,7 @@ struct unwind_hint { s16 sp_offset; u8 sp_reg; u8 type; + u8 signal; u8 end; }; #endif @@ -49,7 +50,7 @@ struct unwind_hint { #ifndef __ASSEMBLY__ -#define UNWIND_HINT(sp_reg, sp_offset, type, end) \ +#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ "987: \n\t" \ ".pushsection .discard.unwind_hints\n\t" \ /* struct unwind_hint */ \ @@ -57,6 +58,7 @@ struct unwind_hint { ".short " __stringify(sp_offset) "\n\t" \ ".byte " __stringify(sp_reg) "\n\t" \ ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ ".byte " __stringify(end) "\n\t" \ ".balign 4 \n\t" \ ".popsection\n\t" @@ -129,7 +131,7 @@ struct unwind_hint { * the debuginfo as necessary. It will also warn if it sees any * inconsistencies. */ -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 .Lunwind_hint_ip_\@: .pushsection .discard.unwind_hints /* struct unwind_hint */ @@ -137,6 +139,7 @@ struct unwind_hint { .short \sp_offset .byte \sp_reg .byte \type + .byte \signal .byte \end .balign 4 .popsection @@ -174,7 +177,7 @@ struct unwind_hint { #ifndef __ASSEMBLY__ -#define UNWIND_HINT(sp_reg, sp_offset, type, end) \ +#define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) @@ -182,7 +185,7 @@ struct unwind_hint { #define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h index 9ee459872600..588391447bfb 100644 --- a/tools/include/uapi/linux/netdev.h +++ b/tools/include/uapi/linux/netdev.h @@ -19,7 +19,7 @@ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP * in zero copy mode. * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw - * oflloading. + * offloading. * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear * XDP buffer support in the driver napi callback. * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements diff --git a/tools/net/ynl/lib/.gitignore b/tools/net/ynl/lib/.gitignore new file mode 100644 index 000000000000..c18dd8d83cee --- /dev/null +++ b/tools/net/ynl/lib/.gitignore @@ -0,0 +1 @@ +__pycache__/ diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py index e204679ad8b7..71da568e2c28 100644 --- a/tools/net/ynl/lib/nlspec.py +++ b/tools/net/ynl/lib/nlspec.py @@ -3,7 +3,6 @@ import collections import importlib import os -import traceback import yaml @@ -234,8 +233,7 @@ class SpecFamily(SpecElement): resolved.append(elem) if len(resolved) == 0: - traceback.print_exception(last_exception) - raise Exception("Could not resolve any spec element, infinite loop?") + raise last_exception def new_attr_set(self, elem): return SpecAttrSet(self, elem) diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index 3942f24b9163..274e9c566f61 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -546,7 +546,7 @@ class Struct: max_val = 0 self.attr_max_val = None for name, attr in self.attr_list: - if attr.value > max_val: + if attr.value >= max_val: max_val = attr.value self.attr_max_val = attr self.attrs[name] = attr diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore index 14236db3677f..4faa4dd72f35 100644 --- a/tools/objtool/.gitignore +++ b/tools/objtool/.gitignore @@ -2,3 +2,4 @@ arch/x86/lib/inat-tables.c /objtool fixdep +libsubcmd/ diff --git a/tools/objtool/Build b/tools/objtool/Build index 33f2ee5a46d3..a3cdf8af6635 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -16,8 +16,6 @@ objtool-y += libctype.o objtool-y += str_error_r.o objtool-y += librbtree.o -CFLAGS += -I$(srctree)/tools/lib - $(OUTPUT)libstring.o: ../lib/string.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt index 8a671902a187..8e53fc6735ef 100644 --- a/tools/objtool/Documentation/objtool.txt +++ b/tools/objtool/Documentation/objtool.txt @@ -410,6 +410,14 @@ the objtool maintainers. can remove this warning by putting the ANNOTATE_INTRA_FUNCTION_CALL directive right before the call. +12. file.o: warning: func(): not an indirect call target + + This means that objtool is running with --ibt and a function expected + to be an indirect call target is not. In particular, this happens for + init_module() or cleanup_module() if a module relies on these special + names and does not use module_init() / module_exit() macros to create + them. + If the error doesn't seem to make sense, it could be a bug in objtool. Feel free to ask the objtool maintainer for help. diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index a3a9cc24e0e3..83b100c1e7f6 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -2,19 +2,18 @@ include ../scripts/Makefile.include include ../scripts/Makefile.arch -# always use the host compiler -AR = $(HOSTAR) -CC = $(HOSTCC) -LD = $(HOSTLD) - ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif -SUBCMD_SRCDIR = $(srctree)/tools/lib/subcmd/ -LIBSUBCMD_OUTPUT = $(or $(OUTPUT),$(CURDIR)/) -LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a +LIBSUBCMD_DIR = $(srctree)/tools/lib/subcmd/ +ifneq ($(OUTPUT),) + LIBSUBCMD_OUTPUT = $(abspath $(OUTPUT))/libsubcmd +else + LIBSUBCMD_OUTPUT = $(CURDIR)/libsubcmd +endif +LIBSUBCMD = $(LIBSUBCMD_OUTPUT)/libsubcmd.a OBJTOOL := $(OUTPUT)objtool OBJTOOL_IN := $(OBJTOOL)-in.o @@ -28,16 +27,29 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/arch/$(SRCARCH)/include \ -I$(srctree)/tools/objtool/include \ - -I$(srctree)/tools/objtool/arch/$(SRCARCH)/include + -I$(srctree)/tools/objtool/arch/$(SRCARCH)/include \ + -I$(LIBSUBCMD_OUTPUT)/include +# Note, EXTRA_WARNINGS here was determined for CC and not HOSTCC, it +# is passed here to match a legacy behavior. WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -Wno-nested-externs -CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) -LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) +OBJTOOL_CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) +OBJTOOL_LDFLAGS := $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) # Allow old libelf to be used: -elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) -CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED) +elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - | grep elf_getshdr) +OBJTOOL_CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED) + +# Always want host compilation. +HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" AWK = awk +MKDIR = mkdir + +ifeq ($(V),1) + Q = +else + Q = @ +endif BUILD_ORC := n @@ -49,21 +61,33 @@ export BUILD_ORC export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include -$(OBJTOOL_IN): fixdep FORCE - @$(CONFIG_SHELL) ./sync-check.sh - @$(MAKE) $(build)=objtool +$(OBJTOOL_IN): fixdep $(LIBSUBCMD) FORCE + $(Q)$(CONFIG_SHELL) ./sync-check.sh + $(Q)$(MAKE) $(build)=objtool $(HOST_OVERRIDES) CFLAGS="$(OBJTOOL_CFLAGS)" \ + LDFLAGS="$(OBJTOOL_LDFLAGS)" + $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) - $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ + $(QUIET_LINK)$(HOSTCC) $(OBJTOOL_IN) $(OBJTOOL_LDFLAGS) -o $@ + + +$(LIBSUBCMD_OUTPUT): + $(Q)$(MKDIR) -p $@ +$(LIBSUBCMD): fixdep $(LIBSUBCMD_OUTPUT) FORCE + $(Q)$(MAKE) -C $(LIBSUBCMD_DIR) O=$(LIBSUBCMD_OUTPUT) \ + DESTDIR=$(LIBSUBCMD_OUTPUT) prefix= subdir= \ + $(HOST_OVERRIDES) EXTRA_CFLAGS="$(OBJTOOL_CFLAGS)" \ + $@ install_headers -$(LIBSUBCMD): fixdep FORCE - $(Q)$(MAKE) -C $(SUBCMD_SRCDIR) OUTPUT=$(LIBSUBCMD_OUTPUT) +$(LIBSUBCMD)-clean: + $(call QUIET_CLEAN, libsubcmd) + $(Q)$(RM) -r -- $(LIBSUBCMD_OUTPUT) -clean: +clean: $(LIBSUBCMD)-clean $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL) $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete - $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep $(LIBSUBCMD) + $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep FORCE: diff --git a/tools/objtool/arch/powerpc/decode.c b/tools/objtool/arch/powerpc/decode.c index 9c653805a08a..53b55690f320 100644 --- a/tools/objtool/arch/powerpc/decode.c +++ b/tools/objtool/arch/powerpc/decode.c @@ -41,38 +41,36 @@ const char *arch_ret_insn(int len) int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, - unsigned int *len, enum insn_type *type, - unsigned long *immediate, - struct list_head *ops_list) + struct instruction *insn) { unsigned int opcode; enum insn_type typ; unsigned long imm; - u32 insn; + u32 ins; - insn = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset)); - opcode = insn >> 26; + ins = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset)); + opcode = ins >> 26; typ = INSN_OTHER; imm = 0; switch (opcode) { case 18: /* b[l][a] */ - if ((insn & 3) == 1) /* bl */ + if ((ins & 3) == 1) /* bl */ typ = INSN_CALL; - imm = insn & 0x3fffffc; + imm = ins & 0x3fffffc; if (imm & 0x2000000) imm -= 0x4000000; break; } if (opcode == 1) - *len = 8; + insn->len = 8; else - *len = 4; + insn->len = 4; - *type = typ; - *immediate = imm; + insn->type = typ; + insn->immediate = imm; return 0; } diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index e7b030f7e2a5..9ef024fd648c 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -105,7 +105,7 @@ bool arch_pc_relative_reloc(struct reloc *reloc) #define ADD_OP(op) \ if (!(op = calloc(1, sizeof(*op)))) \ return -1; \ - else for (list_add_tail(&op->list, ops_list); op; op = NULL) + else for (*ops_list = op, ops_list = &op->next; op; op = NULL) /* * Helpers to decode ModRM/SIB: @@ -146,12 +146,11 @@ static bool has_notrack_prefix(struct insn *insn) int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, - unsigned int *len, enum insn_type *type, - unsigned long *immediate, - struct list_head *ops_list) + struct instruction *insn) { + struct stack_op **ops_list = &insn->stack_ops; const struct elf *elf = file->elf; - struct insn insn; + struct insn ins; int x86_64, ret; unsigned char op1, op2, op3, prefix, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0, @@ -165,42 +164,42 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec if (x86_64 == -1) return -1; - ret = insn_decode(&insn, sec->data->d_buf + offset, maxlen, + ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen, x86_64 ? INSN_MODE_64 : INSN_MODE_32); if (ret < 0) { WARN("can't decode instruction at %s:0x%lx", sec->name, offset); return -1; } - *len = insn.length; - *type = INSN_OTHER; + insn->len = ins.length; + insn->type = INSN_OTHER; - if (insn.vex_prefix.nbytes) + if (ins.vex_prefix.nbytes) return 0; - prefix = insn.prefixes.bytes[0]; + prefix = ins.prefixes.bytes[0]; - op1 = insn.opcode.bytes[0]; - op2 = insn.opcode.bytes[1]; - op3 = insn.opcode.bytes[2]; + op1 = ins.opcode.bytes[0]; + op2 = ins.opcode.bytes[1]; + op3 = ins.opcode.bytes[2]; - if (insn.rex_prefix.nbytes) { - rex = insn.rex_prefix.bytes[0]; + if (ins.rex_prefix.nbytes) { + rex = ins.rex_prefix.bytes[0]; rex_w = X86_REX_W(rex) >> 3; rex_r = X86_REX_R(rex) >> 2; rex_x = X86_REX_X(rex) >> 1; rex_b = X86_REX_B(rex); } - if (insn.modrm.nbytes) { - modrm = insn.modrm.bytes[0]; + if (ins.modrm.nbytes) { + modrm = ins.modrm.bytes[0]; modrm_mod = X86_MODRM_MOD(modrm); modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r; modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b; } - if (insn.sib.nbytes) { - sib = insn.sib.bytes[0]; + if (ins.sib.nbytes) { + sib = ins.sib.bytes[0]; /* sib_scale = X86_SIB_SCALE(sib); */ sib_index = X86_SIB_INDEX(sib) + 8*rex_x; sib_base = X86_SIB_BASE(sib) + 8*rex_b; @@ -254,7 +253,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec break; case 0x70 ... 0x7f: - *type = INSN_JUMP_CONDITIONAL; + insn->type = INSN_JUMP_CONDITIONAL; break; case 0x80 ... 0x83: @@ -278,7 +277,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec if (!rm_is_reg(CFI_SP)) break; - imm = insn.immediate.value; + imm = ins.immediate.value; if (op1 & 2) { /* sign extend */ if (op1 & 1) { /* imm32 */ imm <<= 32; @@ -309,7 +308,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec ADD_OP(op) { op->src.type = OP_SRC_AND; op->src.reg = CFI_SP; - op->src.offset = insn.immediate.value; + op->src.offset = ins.immediate.value; op->dest.type = OP_DEST_REG; op->dest.reg = CFI_SP; } @@ -356,7 +355,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec op->src.reg = CFI_SP; op->dest.type = OP_DEST_REG_INDIRECT; op->dest.reg = modrm_rm; - op->dest.offset = insn.displacement.value; + op->dest.offset = ins.displacement.value; } break; } @@ -389,7 +388,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec op->src.reg = modrm_reg; op->dest.type = OP_DEST_REG_INDIRECT; op->dest.reg = CFI_BP; - op->dest.offset = insn.displacement.value; + op->dest.offset = ins.displacement.value; } break; } @@ -402,7 +401,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec op->src.reg = modrm_reg; op->dest.type = OP_DEST_REG_INDIRECT; op->dest.reg = CFI_SP; - op->dest.offset = insn.displacement.value; + op->dest.offset = ins.displacement.value; } break; } @@ -419,7 +418,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec ADD_OP(op) { op->src.type = OP_SRC_REG_INDIRECT; op->src.reg = CFI_BP; - op->src.offset = insn.displacement.value; + op->src.offset = ins.displacement.value; op->dest.type = OP_DEST_REG; op->dest.reg = modrm_reg; } @@ -432,7 +431,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec ADD_OP(op) { op->src.type = OP_SRC_REG_INDIRECT; op->src.reg = CFI_SP; - op->src.offset = insn.displacement.value; + op->src.offset = ins.displacement.value; op->dest.type = OP_DEST_REG; op->dest.reg = modrm_reg; } @@ -464,7 +463,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec /* lea disp(%src), %dst */ ADD_OP(op) { - op->src.offset = insn.displacement.value; + op->src.offset = ins.displacement.value; if (!op->src.offset) { /* lea (%src), %dst */ op->src.type = OP_SRC_REG; @@ -487,7 +486,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec break; case 0x90: - *type = INSN_NOP; + insn->type = INSN_NOP; break; case 0x9c: @@ -511,39 +510,39 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec if (op2 == 0x01) { if (modrm == 0xca) - *type = INSN_CLAC; + insn->type = INSN_CLAC; else if (modrm == 0xcb) - *type = INSN_STAC; + insn->type = INSN_STAC; } else if (op2 >= 0x80 && op2 <= 0x8f) { - *type = INSN_JUMP_CONDITIONAL; + insn->type = INSN_JUMP_CONDITIONAL; } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 || op2 == 0x35) { /* sysenter, sysret */ - *type = INSN_CONTEXT_SWITCH; + insn->type = INSN_CONTEXT_SWITCH; } else if (op2 == 0x0b || op2 == 0xb9) { /* ud2 */ - *type = INSN_BUG; + insn->type = INSN_BUG; } else if (op2 == 0x0d || op2 == 0x1f) { /* nopl/nopw */ - *type = INSN_NOP; + insn->type = INSN_NOP; } else if (op2 == 0x1e) { if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb)) - *type = INSN_ENDBR; + insn->type = INSN_ENDBR; } else if (op2 == 0x38 && op3 == 0xf8) { - if (insn.prefixes.nbytes == 1 && - insn.prefixes.bytes[0] == 0xf2) { + if (ins.prefixes.nbytes == 1 && + ins.prefixes.bytes[0] == 0xf2) { /* ENQCMD cannot be used in the kernel. */ WARN("ENQCMD instruction at %s:%lx", sec->name, offset); @@ -591,29 +590,29 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec case 0xcc: /* int3 */ - *type = INSN_TRAP; + insn->type = INSN_TRAP; break; case 0xe3: /* jecxz/jrcxz */ - *type = INSN_JUMP_CONDITIONAL; + insn->type = INSN_JUMP_CONDITIONAL; break; case 0xe9: case 0xeb: - *type = INSN_JUMP_UNCONDITIONAL; + insn->type = INSN_JUMP_UNCONDITIONAL; break; case 0xc2: case 0xc3: - *type = INSN_RETURN; + insn->type = INSN_RETURN; break; case 0xc7: /* mov imm, r/m */ if (!opts.noinstr) break; - if (insn.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) { + if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) { struct reloc *immr, *disp; struct symbol *func; int idx; @@ -661,17 +660,17 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec case 0xca: /* retf */ case 0xcb: /* retf */ - *type = INSN_CONTEXT_SWITCH; + insn->type = INSN_CONTEXT_SWITCH; break; case 0xe0: /* loopne */ case 0xe1: /* loope */ case 0xe2: /* loop */ - *type = INSN_JUMP_CONDITIONAL; + insn->type = INSN_JUMP_CONDITIONAL; break; case 0xe8: - *type = INSN_CALL; + insn->type = INSN_CALL; /* * For the impact on the stack, a CALL behaves like * a PUSH of an immediate value (the return address). @@ -683,30 +682,30 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec break; case 0xfc: - *type = INSN_CLD; + insn->type = INSN_CLD; break; case 0xfd: - *type = INSN_STD; + insn->type = INSN_STD; break; case 0xff: if (modrm_reg == 2 || modrm_reg == 3) { - *type = INSN_CALL_DYNAMIC; - if (has_notrack_prefix(&insn)) + insn->type = INSN_CALL_DYNAMIC; + if (has_notrack_prefix(&ins)) WARN("notrack prefix found at %s:0x%lx", sec->name, offset); } else if (modrm_reg == 4) { - *type = INSN_JUMP_DYNAMIC; - if (has_notrack_prefix(&insn)) + insn->type = INSN_JUMP_DYNAMIC; + if (has_notrack_prefix(&ins)) WARN("notrack prefix found at %s:0x%lx", sec->name, offset); } else if (modrm_reg == 5) { /* jmpf */ - *type = INSN_CONTEXT_SWITCH; + insn->type = INSN_CONTEXT_SWITCH; } else if (modrm_reg == 6) { @@ -723,7 +722,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec break; } - *immediate = insn.immediate.nbytes ? insn.immediate.value : 0; + insn->immediate = ins.immediate.nbytes ? ins.immediate.value : 0; return 0; } diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index a4f39407bf59..7c175198d09f 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -65,7 +65,7 @@ static int parse_hacks(const struct option *opt, const char *str, int unset) return found ? 0 : -1; } -const struct option check_options[] = { +static const struct option check_options[] = { OPT_GROUP("Actions:"), OPT_CALLBACK_OPTARG('h', "hacks", NULL, NULL, "jump_label,noinstr,skylake", "patch toolchain bugs/limitations", parse_hacks), OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"), diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 35374812afdc..f937be1afe65 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -23,7 +23,7 @@ #include <linux/static_call_types.h> struct alternative { - struct list_head list; + struct alternative *next; struct instruction *insn; bool skip_orig; }; @@ -47,27 +47,29 @@ struct instruction *find_insn(struct objtool_file *file, return NULL; } -static struct instruction *next_insn_same_sec(struct objtool_file *file, - struct instruction *insn) +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn) { - struct instruction *next = list_next_entry(insn, list); + if (insn->idx == INSN_CHUNK_MAX) + return find_insn(file, insn->sec, insn->offset + insn->len); - if (!next || &next->list == &file->insn_list || next->sec != insn->sec) + insn++; + if (!insn->len) return NULL; - return next; + return insn; } static struct instruction *next_insn_same_func(struct objtool_file *file, struct instruction *insn) { - struct instruction *next = list_next_entry(insn, list); + struct instruction *next = next_insn_same_sec(file, insn); struct symbol *func = insn_func(insn); if (!func) return NULL; - if (&next->list != &file->insn_list && insn_func(next) == func) + if (next && insn_func(next) == func) return next; /* Check if we're already in the subfunction: */ @@ -78,17 +80,35 @@ static struct instruction *next_insn_same_func(struct objtool_file *file, return find_insn(file, func->cfunc->sec, func->cfunc->offset); } +static struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == 0) { + if (insn->prev_len) + return find_insn(file, insn->sec, insn->offset - insn->prev_len); + return NULL; + } + + return insn - 1; +} + static struct instruction *prev_insn_same_sym(struct objtool_file *file, - struct instruction *insn) + struct instruction *insn) { - struct instruction *prev = list_prev_entry(insn, list); + struct instruction *prev = prev_insn_same_sec(file, insn); - if (&prev->list != &file->insn_list && insn_func(prev) == insn_func(insn)) + if (prev && insn_func(prev) == insn_func(insn)) return prev; return NULL; } +#define for_each_insn(file, insn) \ + for (struct section *__sec, *__fake = (struct section *)1; \ + __fake; __fake = NULL) \ + for_each_sec(file, __sec) \ + sec_for_each_insn(file, __sec, insn) + #define func_for_each_insn(file, func, insn) \ for (insn = find_insn(file, func->sec, func->offset); \ insn; \ @@ -96,16 +116,13 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file, #define sym_for_each_insn(file, sym, insn) \ for (insn = find_insn(file, sym->sec, sym->offset); \ - insn && &insn->list != &file->insn_list && \ - insn->sec == sym->sec && \ - insn->offset < sym->offset + sym->len; \ - insn = list_next_entry(insn, list)) + insn && insn->offset < sym->offset + sym->len; \ + insn = next_insn_same_sec(file, insn)) #define sym_for_each_insn_continue_reverse(file, sym, insn) \ - for (insn = list_prev_entry(insn, list); \ - &insn->list != &file->insn_list && \ - insn->sec == sym->sec && insn->offset >= sym->offset; \ - insn = list_prev_entry(insn, list)) + for (insn = prev_insn_same_sec(file, insn); \ + insn && insn->offset >= sym->offset; \ + insn = prev_insn_same_sec(file, insn)) #define sec_for_each_insn_from(file, insn) \ for (; insn; insn = next_insn_same_sec(file, insn)) @@ -114,16 +131,34 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file, for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) +static inline struct symbol *insn_call_dest(struct instruction *insn) +{ + if (insn->type == INSN_JUMP_DYNAMIC || + insn->type == INSN_CALL_DYNAMIC) + return NULL; + + return insn->_call_dest; +} + +static inline struct reloc *insn_jump_table(struct instruction *insn) +{ + if (insn->type == INSN_JUMP_DYNAMIC || + insn->type == INSN_CALL_DYNAMIC) + return insn->_jump_table; + + return NULL; +} + static bool is_jump_table_jump(struct instruction *insn) { struct alt_group *alt_group = insn->alt_group; - if (insn->jump_table) + if (insn_jump_table(insn)) return true; /* Retpoline alternative for a jump table? */ return alt_group && alt_group->orig_group && - alt_group->orig_group->first_insn->jump_table; + insn_jump_table(alt_group->orig_group->first_insn); } static bool is_sibling_call(struct instruction *insn) @@ -137,8 +172,8 @@ static bool is_sibling_call(struct instruction *insn) return !is_jump_table_jump(insn); } - /* add_jump_destinations() sets insn->call_dest for sibling calls. */ - return (is_static_jump(insn) && insn->call_dest); + /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */ + return (is_static_jump(insn) && insn_call_dest(insn)); } /* @@ -274,8 +309,8 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state, /* * We need the full vmlinux for noinstr validation, otherwise we can - * not correctly determine insn->call_dest->sec (external symbols do - * not have a section). + * not correctly determine insn_call_dest(insn)->sec (external symbols + * do not have a section). */ if (opts.link && opts.noinstr && sec) state->noinstr = sec->noinstr; @@ -366,6 +401,9 @@ static int decode_instructions(struct objtool_file *file) int ret; for_each_sec(file, sec) { + struct instruction *insns = NULL; + u8 prev_len = 0; + u8 idx = 0; if (!(sec->sh.sh_flags & SHF_EXECINSTR)) continue; @@ -391,26 +429,31 @@ static int decode_instructions(struct objtool_file *file) sec->init = true; for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { - insn = malloc(sizeof(*insn)); - if (!insn) { - WARN("malloc failed"); - return -1; + if (!insns || idx == INSN_CHUNK_MAX) { + insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); + if (!insns) { + WARN("malloc failed"); + return -1; + } + idx = 0; + } else { + idx++; } - memset(insn, 0, sizeof(*insn)); - INIT_LIST_HEAD(&insn->alts); - INIT_LIST_HEAD(&insn->stack_ops); - INIT_LIST_HEAD(&insn->call_node); + insn = &insns[idx]; + insn->idx = idx; + INIT_LIST_HEAD(&insn->call_node); insn->sec = sec; insn->offset = offset; + insn->prev_len = prev_len; ret = arch_decode_instruction(file, sec, offset, sec->sh.sh_size - offset, - &insn->len, &insn->type, - &insn->immediate, - &insn->stack_ops); + insn); if (ret) - goto err; + return ret; + + prev_len = insn->len; /* * By default, "ud2" is a dead end unless otherwise @@ -421,10 +464,11 @@ static int decode_instructions(struct objtool_file *file) insn->dead_end = true; hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); - list_add_tail(&insn->list, &file->insn_list); nr_insns++; } +// printf("%s: last chunk used: %d\n", sec->name, (int)idx); + list_for_each_entry(func, &sec->symbol_list, list) { if (func->type != STT_NOTYPE && func->type != STT_FUNC) continue; @@ -467,10 +511,6 @@ static int decode_instructions(struct objtool_file *file) printf("nr_insns: %lu\n", nr_insns); return 0; - -err: - free(insn); - return ret; } /* @@ -585,7 +625,7 @@ static int add_dead_ends(struct objtool_file *file) } insn = find_insn(file, reloc->sym->sec, reloc->addend); if (insn) - insn = list_prev_entry(insn, list); + insn = prev_insn_same_sec(file, insn); else if (reloc->addend == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { @@ -620,7 +660,7 @@ reachable: } insn = find_insn(file, reloc->sym->sec, reloc->addend); if (insn) - insn = list_prev_entry(insn, list); + insn = prev_insn_same_sec(file, insn); else if (reloc->addend == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { @@ -682,7 +722,7 @@ static int create_static_call_sections(struct objtool_file *file) return -1; /* find key symbol */ - key_name = strdup(insn->call_dest->name); + key_name = strdup(insn_call_dest(insn)->name); if (!key_name) { perror("strdup"); return -1; @@ -690,6 +730,7 @@ static int create_static_call_sections(struct objtool_file *file) if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, STATIC_CALL_TRAMP_PREFIX_LEN)) { WARN("static_call: trampoline name malformed: %s", key_name); + free(key_name); return -1; } tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; @@ -699,6 +740,7 @@ static int create_static_call_sections(struct objtool_file *file) if (!key_sym) { if (!opts.module) { WARN("static_call: can't find static_call_key symbol: %s", tmp); + free(key_name); return -1; } @@ -711,7 +753,7 @@ static int create_static_call_sections(struct objtool_file *file) * trampoline address. This is fixed up in * static_call_add_module(). */ - key_sym = insn->call_dest; + key_sym = insn_call_dest(insn); } free(key_name); @@ -856,8 +898,15 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file) list_for_each_entry(insn, &file->endbr_list, call_node) { int *site = (int *)sec->data->d_buf + idx; + struct symbol *sym = insn->sym; *site = 0; + if (opts.module && sym && sym->type == STT_FUNC && + insn->offset == sym->offset && + (!strcmp(sym->name, "init_module") || + !strcmp(sym->name, "cleanup_module"))) + WARN("%s(): not an indirect call target", sym->name); + if (elf_add_reloc_to_insn(file->elf, sec, idx * sizeof(int), R_X86_64_PC32, @@ -1302,43 +1351,42 @@ __weak bool arch_is_rethunk(struct symbol *sym) return false; } -#define NEGATIVE_RELOC ((void *)-1L) - static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) { - if (insn->reloc == NEGATIVE_RELOC) + struct reloc *reloc; + + if (insn->no_reloc) return NULL; - if (!insn->reloc) { - if (!file) - return NULL; + if (!file) + return NULL; - insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, - insn->offset, insn->len); - if (!insn->reloc) { - insn->reloc = NEGATIVE_RELOC; - return NULL; - } + reloc = find_reloc_by_dest_range(file->elf, insn->sec, + insn->offset, insn->len); + if (!reloc) { + insn->no_reloc = 1; + return NULL; } - return insn->reloc; + return reloc; } static void remove_insn_ops(struct instruction *insn) { - struct stack_op *op, *tmp; + struct stack_op *op, *next; - list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { - list_del(&op->list); + for (op = insn->stack_ops; op; op = next) { + next = op->next; free(op); } + insn->stack_ops = NULL; } static void annotate_call_site(struct objtool_file *file, struct instruction *insn, bool sibling) { struct reloc *reloc = insn_reloc(file, insn); - struct symbol *sym = insn->call_dest; + struct symbol *sym = insn_call_dest(insn); if (!sym) sym = reloc->sym; @@ -1423,7 +1471,7 @@ static void annotate_call_site(struct objtool_file *file, static void add_call_dest(struct objtool_file *file, struct instruction *insn, struct symbol *dest, bool sibling) { - insn->call_dest = dest; + insn->_call_dest = dest; if (!dest) return; @@ -1681,12 +1729,12 @@ static int add_call_destinations(struct objtool_file *file) if (insn->ignore) continue; - if (!insn->call_dest) { + if (!insn_call_dest(insn)) { WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); return -1; } - if (insn_func(insn) && insn->call_dest->type != STT_FUNC) { + if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) { WARN_FUNC("unsupported call to non-function", insn->sec, insn->offset); return -1; @@ -1724,36 +1772,50 @@ static int handle_group_alt(struct objtool_file *file, struct instruction *orig_insn, struct instruction **new_insn) { - struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; + struct instruction *last_new_insn = NULL, *insn, *nop = NULL; struct alt_group *orig_alt_group, *new_alt_group; unsigned long dest_off; - - orig_alt_group = malloc(sizeof(*orig_alt_group)); + orig_alt_group = orig_insn->alt_group; if (!orig_alt_group) { - WARN("malloc failed"); - return -1; - } - orig_alt_group->cfi = calloc(special_alt->orig_len, - sizeof(struct cfi_state *)); - if (!orig_alt_group->cfi) { - WARN("calloc failed"); - return -1; - } + struct instruction *last_orig_insn = NULL; - last_orig_insn = NULL; - insn = orig_insn; - sec_for_each_insn_from(file, insn) { - if (insn->offset >= special_alt->orig_off + special_alt->orig_len) - break; + orig_alt_group = malloc(sizeof(*orig_alt_group)); + if (!orig_alt_group) { + WARN("malloc failed"); + return -1; + } + orig_alt_group->cfi = calloc(special_alt->orig_len, + sizeof(struct cfi_state *)); + if (!orig_alt_group->cfi) { + WARN("calloc failed"); + return -1; + } - insn->alt_group = orig_alt_group; - last_orig_insn = insn; - } - orig_alt_group->orig_group = NULL; - orig_alt_group->first_insn = orig_insn; - orig_alt_group->last_insn = last_orig_insn; + insn = orig_insn; + sec_for_each_insn_from(file, insn) { + if (insn->offset >= special_alt->orig_off + special_alt->orig_len) + break; + insn->alt_group = orig_alt_group; + last_orig_insn = insn; + } + orig_alt_group->orig_group = NULL; + orig_alt_group->first_insn = orig_insn; + orig_alt_group->last_insn = last_orig_insn; + orig_alt_group->nop = NULL; + } else { + if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - + orig_alt_group->first_insn->offset != special_alt->orig_len) { + WARN_FUNC("weirdly overlapping alternative! %ld != %d", + orig_insn->sec, orig_insn->offset, + orig_alt_group->last_insn->offset + + orig_alt_group->last_insn->len - + orig_alt_group->first_insn->offset, + special_alt->orig_len); + return -1; + } + } new_alt_group = malloc(sizeof(*new_alt_group)); if (!new_alt_group) { @@ -1775,8 +1837,6 @@ static int handle_group_alt(struct objtool_file *file, return -1; } memset(nop, 0, sizeof(*nop)); - INIT_LIST_HEAD(&nop->alts); - INIT_LIST_HEAD(&nop->stack_ops); nop->sec = special_alt->new_sec; nop->offset = special_alt->new_off + special_alt->new_len; @@ -1830,7 +1890,7 @@ static int handle_group_alt(struct objtool_file *file, dest_off = arch_jump_destination(insn); if (dest_off == special_alt->new_off + special_alt->new_len) { - insn->jump_dest = next_insn_same_sec(file, last_orig_insn); + insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); if (!insn->jump_dest) { WARN_FUNC("can't find alternative jump destination", insn->sec, insn->offset); @@ -1845,12 +1905,11 @@ static int handle_group_alt(struct objtool_file *file, return -1; } - if (nop) - list_add(&nop->list, &last_new_insn->list); end: new_alt_group->orig_group = orig_alt_group; new_alt_group->first_insn = *new_insn; - new_alt_group->last_insn = nop ? : last_new_insn; + new_alt_group->last_insn = last_new_insn; + new_alt_group->nop = nop; new_alt_group->cfi = orig_alt_group->cfi; return 0; } @@ -1900,7 +1959,7 @@ static int handle_jump_alt(struct objtool_file *file, else file->jl_long++; - *new_insn = list_next_entry(orig_insn, list); + *new_insn = next_insn_same_sec(file, orig_insn); return 0; } @@ -1974,7 +2033,8 @@ static int add_special_section_alts(struct objtool_file *file) alt->insn = new_insn; alt->skip_orig = special_alt->skip_orig; orig_insn->ignore_alts |= special_alt->skip_alt; - list_add_tail(&alt->list, &orig_insn->alts); + alt->next = orig_insn->alts; + orig_insn->alts = alt; list_del(&special_alt->list); free(special_alt); @@ -2033,7 +2093,8 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, } alt->insn = dest_insn; - list_add_tail(&alt->list, &insn->alts); + alt->next = insn->alts; + insn->alts = alt; prev_offset = reloc->offset; } @@ -2123,7 +2184,7 @@ static void mark_func_jump_tables(struct objtool_file *file, reloc = find_jump_table(file, func, insn); if (reloc) { reloc->jump_table_start = true; - insn->jump_table = reloc; + insn->_jump_table = reloc; } } } @@ -2135,10 +2196,10 @@ static int add_func_jump_tables(struct objtool_file *file, int ret; func_for_each_insn(file, func, insn) { - if (!insn->jump_table) + if (!insn_jump_table(insn)) continue; - ret = add_jump_table(file, insn, insn->jump_table); + ret = add_jump_table(file, insn, insn_jump_table(insn)); if (ret) return ret; } @@ -2271,6 +2332,7 @@ static int read_unwind_hints(struct objtool_file *file) cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); cfi.type = hint->type; + cfi.signal = hint->signal; cfi.end = hint->end; insn->cfi = cfi_hash_find_or_add(&cfi); @@ -2610,8 +2672,8 @@ static int decode_sections(struct objtool_file *file) static bool is_fentry_call(struct instruction *insn) { if (insn->type == INSN_CALL && - insn->call_dest && - insn->call_dest->fentry) + insn_call_dest(insn) && + insn_call_dest(insn)->fentry) return true; return false; @@ -3206,8 +3268,12 @@ static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn alt_cfi[group_off] = insn->cfi; } else { if (cficmp(alt_cfi[group_off], insn->cfi)) { - WARN_FUNC("stack layout conflict in alternatives", - insn->sec, insn->offset); + struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; + struct instruction *orig = orig_group->first_insn; + char *where = offstr(insn->sec, insn->offset); + WARN_FUNC("stack layout conflict in alternatives: %s", + orig->sec, orig->offset, where); + free(where); return -1; } } @@ -3221,7 +3287,7 @@ static int handle_insn_ops(struct instruction *insn, { struct stack_op *op; - list_for_each_entry(op, &insn->stack_ops, list) { + for (op = insn->stack_ops; op; op = op->next) { if (update_cfi_state(insn, next_insn, &state->cfi, op)) return 1; @@ -3318,8 +3384,8 @@ static inline const char *call_dest_name(struct instruction *insn) struct reloc *rel; int idx; - if (insn->call_dest) - return insn->call_dest->name; + if (insn_call_dest(insn)) + return insn_call_dest(insn)->name; rel = insn_reloc(NULL, insn); if (rel && !strcmp(rel->sym->name, "pv_ops")) { @@ -3401,13 +3467,13 @@ static int validate_call(struct objtool_file *file, struct insn_state *state) { if (state->noinstr && state->instr <= 0 && - !noinstr_call_dest(file, insn, insn->call_dest)) { + !noinstr_call_dest(file, insn, insn_call_dest(insn))) { WARN_FUNC("call to %s() leaves .noinstr.text section", insn->sec, insn->offset, call_dest_name(insn)); return 1; } - if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { + if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { WARN_FUNC("call to %s() with UACCESS enabled", insn->sec, insn->offset, call_dest_name(insn)); return 1; @@ -3485,11 +3551,28 @@ static struct instruction *next_insn_to_validate(struct objtool_file *file, * Simulate the fact that alternatives are patched in-place. When the * end of a replacement alt_group is reached, redirect objtool flow to * the end of the original alt_group. + * + * insn->alts->insn -> alt_group->first_insn + * ... + * alt_group->last_insn + * [alt_group->nop] -> next(orig_group->last_insn) */ - if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) - return next_insn_same_sec(file, alt_group->orig_group->last_insn); + if (alt_group) { + if (alt_group->nop) { + /* ->nop implies ->orig_group */ + if (insn == alt_group->last_insn) + return alt_group->nop; + if (insn == alt_group->nop) + goto next_orig; + } + if (insn == alt_group->last_insn && alt_group->orig_group) + goto next_orig; + } return next_insn_same_sec(file, insn); + +next_orig: + return next_insn_same_sec(file, alt_group->orig_group->last_insn); } /* @@ -3590,10 +3673,10 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (propagate_alt_cfi(file, insn)) return 1; - if (!insn->ignore_alts && !list_empty(&insn->alts)) { + if (!insn->ignore_alts && insn->alts) { bool skip_orig = false; - list_for_each_entry(alt, &insn->alts, list) { + for (alt = insn->alts; alt; alt = alt->next) { if (alt->skip_orig) skip_orig = true; @@ -3740,11 +3823,25 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, return 0; } +static int validate_unwind_hint(struct objtool_file *file, + struct instruction *insn, + struct insn_state *state) +{ + if (insn->hint && !insn->visited && !insn->ignore) { + int ret = validate_branch(file, insn_func(insn), insn, *state); + if (ret && opts.backtrace) + BT_FUNC("<=== (hint)", insn); + return ret; + } + + return 0; +} + static int validate_unwind_hints(struct objtool_file *file, struct section *sec) { struct instruction *insn; struct insn_state state; - int ret, warnings = 0; + int warnings = 0; if (!file->hints) return 0; @@ -3752,22 +3849,11 @@ static int validate_unwind_hints(struct objtool_file *file, struct section *sec) init_insn_state(file, &state, sec); if (sec) { - insn = find_insn(file, sec, 0); - if (!insn) - return 0; + sec_for_each_insn(file, sec, insn) + warnings += validate_unwind_hint(file, insn, &state); } else { - insn = list_first_entry(&file->insn_list, typeof(*insn), list); - } - - while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { - if (insn->hint && !insn->visited && !insn->ignore) { - ret = validate_branch(file, insn_func(insn), insn, state); - if (ret && opts.backtrace) - BT_FUNC("<=== (hint)", insn); - warnings += ret; - } - - insn = list_next_entry(insn, list); + for_each_insn(file, insn) + warnings += validate_unwind_hint(file, insn, &state); } return warnings; @@ -3792,11 +3878,11 @@ static int validate_entry(struct objtool_file *file, struct instruction *insn) insn->visited |= VISITED_ENTRY; - if (!insn->ignore_alts && !list_empty(&insn->alts)) { + if (!insn->ignore_alts && insn->alts) { struct alternative *alt; bool skip_orig = false; - list_for_each_entry(alt, &insn->alts, list) { + for (alt = insn->alts; alt; alt = alt->next) { if (alt->skip_orig) skip_orig = true; @@ -3845,11 +3931,11 @@ static int validate_entry(struct objtool_file *file, struct instruction *insn) /* fallthrough */ case INSN_CALL: - dest = find_insn(file, insn->call_dest->sec, - insn->call_dest->offset); + dest = find_insn(file, insn_call_dest(insn)->sec, + insn_call_dest(insn)->offset); if (!dest) { WARN("Unresolved function after linking!?: %s", - insn->call_dest->name); + insn_call_dest(insn)->name); return -1; } @@ -3950,13 +4036,13 @@ static int validate_retpoline(struct objtool_file *file) static bool is_kasan_insn(struct instruction *insn) { return (insn->type == INSN_CALL && - !strcmp(insn->call_dest->name, "__asan_handle_no_return")); + !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return")); } static bool is_ubsan_insn(struct instruction *insn) { return (insn->type == INSN_CALL && - !strcmp(insn->call_dest->name, + !strcmp(insn_call_dest(insn)->name, "__ubsan_handle_builtin_unreachable")); } @@ -4033,8 +4119,9 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio * * It may also insert a UD2 after calling a __noreturn function. */ - prev_insn = list_prev_entry(insn, list); - if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && + prev_insn = prev_insn_same_sec(file, insn); + if ((prev_insn->dead_end || + dead_end_function(file, insn_call_dest(prev_insn))) && (insn->type == INSN_BUG || (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && insn->jump_dest->type == INSN_BUG))) @@ -4064,7 +4151,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len) break; - insn = list_next_entry(insn, list); + insn = next_insn_same_sec(file, insn); } return false; @@ -4077,10 +4164,10 @@ static int add_prefix_symbol(struct objtool_file *file, struct symbol *func, return 0; for (;;) { - struct instruction *prev = list_prev_entry(insn, list); + struct instruction *prev = prev_insn_same_sec(file, insn); u64 offset; - if (&prev->list == &file->insn_list) + if (!prev) break; if (prev->type != INSN_NOP) @@ -4479,7 +4566,7 @@ int check(struct objtool_file *file) warnings += ret; - if (list_empty(&file->insn_list)) + if (!nr_insns) goto out; if (opts.retpoline) { @@ -4588,7 +4675,7 @@ int check(struct objtool_file *file) warnings += ret; } - if (opts.orc && !list_empty(&file->insn_list)) { + if (opts.orc && nr_insns) { ret = orc_create(file); if (ret < 0) goto out; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 64443a7f4bbf..6806ce01d933 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -284,13 +284,13 @@ static int read_sections(struct elf *elf) !elf_alloc_hash(section_name, sections_nr)) return -1; + elf->section_data = calloc(sections_nr, sizeof(*sec)); + if (!elf->section_data) { + perror("calloc"); + return -1; + } for (i = 0; i < sections_nr; i++) { - sec = malloc(sizeof(*sec)); - if (!sec) { - perror("malloc"); - return -1; - } - memset(sec, 0, sizeof(*sec)); + sec = &elf->section_data[i]; INIT_LIST_HEAD(&sec->symbol_list); INIT_LIST_HEAD(&sec->reloc_list); @@ -422,13 +422,13 @@ static int read_symbols(struct elf *elf) !elf_alloc_hash(symbol_name, symbols_nr)) return -1; + elf->symbol_data = calloc(symbols_nr, sizeof(*sym)); + if (!elf->symbol_data) { + perror("calloc"); + return -1; + } for (i = 0; i < symbols_nr; i++) { - sym = malloc(sizeof(*sym)); - if (!sym) { - perror("malloc"); - return -1; - } - memset(sym, 0, sizeof(*sym)); + sym = &elf->symbol_data[i]; sym->idx = i; @@ -918,13 +918,13 @@ static int read_relocs(struct elf *elf) sec->base->reloc = sec; nr_reloc = 0; + sec->reloc_data = calloc(sec->sh.sh_size / sec->sh.sh_entsize, sizeof(*reloc)); + if (!sec->reloc_data) { + perror("calloc"); + return -1; + } for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) { - reloc = malloc(sizeof(*reloc)); - if (!reloc) { - perror("malloc"); - return -1; - } - memset(reloc, 0, sizeof(*reloc)); + reloc = &sec->reloc_data[i]; switch (sec->sh.sh_type) { case SHT_REL: if (read_rel_reloc(sec, i, reloc, &symndx)) @@ -1453,16 +1453,16 @@ void elf_close(struct elf *elf) list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) { list_del(&sym->list); hash_del(&sym->hash); - free(sym); } list_for_each_entry_safe(reloc, tmpreloc, &sec->reloc_list, list) { list_del(&reloc->list); hash_del(&reloc->hash); - free(reloc); } list_del(&sec->list); - free(sec); + free(sec->reloc_data); } + free(elf->symbol_data); + free(elf->section_data); free(elf); } diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 4ecb480131c7..2b6d2ce4f9a5 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -62,9 +62,9 @@ struct op_src { }; struct stack_op { + struct stack_op *next; struct op_dest dest; struct op_src src; - struct list_head list; }; struct instruction; @@ -75,9 +75,7 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state); int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, - unsigned int *len, enum insn_type *type, - unsigned long *immediate, - struct list_head *ops_list); + struct instruction *insn); bool arch_callee_saved_reg(unsigned char reg); diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h index fa45044e3863..2a108e648b7a 100644 --- a/tools/objtool/include/objtool/builtin.h +++ b/tools/objtool/include/objtool/builtin.h @@ -7,8 +7,6 @@ #include <subcmd/parse-options.h> -extern const struct option check_options[]; - struct opts { /* actions: */ bool dump_orc; diff --git a/tools/objtool/include/objtool/cfi.h b/tools/objtool/include/objtool/cfi.h index f11d1ac1dadf..b1258e79a1b7 100644 --- a/tools/objtool/include/objtool/cfi.h +++ b/tools/objtool/include/objtool/cfi.h @@ -34,6 +34,7 @@ struct cfi_state { unsigned char type; bool bp_scratch; bool drap; + bool signal; bool end; }; diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h index acd7fae59348..3e7c7004f7df 100644 --- a/tools/objtool/include/objtool/check.h +++ b/tools/objtool/include/objtool/check.h @@ -27,7 +27,7 @@ struct alt_group { struct alt_group *orig_group; /* First and last instructions in the group */ - struct instruction *first_insn, *last_insn; + struct instruction *first_insn, *last_insn, *nop; /* * Byte-offset-addressed len-sized array of pointers to CFI structs. @@ -36,39 +36,46 @@ struct alt_group { struct cfi_state **cfi; }; +#define INSN_CHUNK_BITS 8 +#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS) +#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1) + struct instruction { - struct list_head list; struct hlist_node hash; struct list_head call_node; struct section *sec; unsigned long offset; - unsigned int len; - enum insn_type type; unsigned long immediate; - u16 dead_end : 1, - ignore : 1, - ignore_alts : 1, - hint : 1, - save : 1, - restore : 1, - retpoline_safe : 1, - noendbr : 1, - entry : 1; - /* 7 bit hole */ - + u8 len; + u8 prev_len; + u8 type; s8 instr; - u8 visited; + + u32 idx : INSN_CHUNK_BITS, + dead_end : 1, + ignore : 1, + ignore_alts : 1, + hint : 1, + save : 1, + restore : 1, + retpoline_safe : 1, + noendbr : 1, + entry : 1, + visited : 4, + no_reloc : 1; + /* 10 bit hole */ struct alt_group *alt_group; - struct symbol *call_dest; struct instruction *jump_dest; struct instruction *first_jump_src; - struct reloc *jump_table; - struct reloc *reloc; - struct list_head alts; + union { + struct symbol *_call_dest; + struct reloc *_jump_table; + }; + struct alternative *alts; struct symbol *sym; - struct list_head stack_ops; + struct stack_op *stack_ops; struct cfi_state *cfi; }; @@ -107,13 +114,11 @@ static inline bool is_jump(struct instruction *insn) struct instruction *find_insn(struct objtool_file *file, struct section *sec, unsigned long offset); -#define for_each_insn(file, insn) \ - list_for_each_entry(insn, &file->insn_list, list) +struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruction *insn); -#define sec_for_each_insn(file, sec, insn) \ - for (insn = find_insn(file, sec, 0); \ - insn && &insn->list != &file->insn_list && \ - insn->sec == sec; \ - insn = list_next_entry(insn, list)) +#define sec_for_each_insn(file, _sec, insn) \ + for (insn = find_insn(file, _sec, 0); \ + insn && insn->sec == _sec; \ + insn = next_insn_same_sec(file, insn)) #endif /* _CHECK_H */ diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index bb60fd42b46f..ad0024da262b 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -39,6 +39,7 @@ struct section { char *name; int idx; bool changed, text, rodata, noinstr, init, truncate; + struct reloc *reloc_data; }; struct symbol { @@ -49,12 +50,11 @@ struct symbol { GElf_Sym sym; struct section *sec; char *name; - unsigned int idx; - unsigned char bind, type; + unsigned int idx, len; unsigned long offset; - unsigned int len; unsigned long __subtree_last; struct symbol *pfunc, *cfunc, *alias; + unsigned char bind, type; u8 uaccess_safe : 1; u8 static_call_tramp : 1; u8 retpoline_thunk : 1; @@ -104,6 +104,9 @@ struct elf { struct hlist_head *section_hash; struct hlist_head *section_name_hash; struct hlist_head *reloc_hash; + + struct section *section_data; + struct symbol *symbol_data; }; #define OFFSET_STRIDE_BITS 4 diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h index 6b40977bcdb1..94a33ee7b363 100644 --- a/tools/objtool/include/objtool/objtool.h +++ b/tools/objtool/include/objtool/objtool.h @@ -21,7 +21,6 @@ struct pv_state { struct objtool_file { struct elf *elf; - struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 20); struct list_head retpoline_call_list; struct list_head return_thunk_list; diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h index dc4721e19002..86d4af9c5aa9 100644 --- a/tools/objtool/include/objtool/special.h +++ b/tools/objtool/include/objtool/special.h @@ -19,6 +19,7 @@ struct special_alt { bool skip_orig; bool skip_alt; bool jump_or_nop; + u8 key_addend; struct section *orig_sec; unsigned long orig_off; @@ -27,7 +28,6 @@ struct special_alt { unsigned long new_off; unsigned int orig_len, new_len; /* group only */ - u8 key_addend; }; int special_get_alts(struct elf *elf, struct list_head *alts); diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index 6affd8067f83..c54f7235c5d9 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -99,7 +99,6 @@ struct objtool_file *objtool_open_read(const char *_objname) return NULL; } - INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); INIT_LIST_HEAD(&file.retpoline_call_list); INIT_LIST_HEAD(&file.return_thunk_list); diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 4f1211fec82c..2d8ebdcd1db3 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -211,8 +211,8 @@ int orc_dump(const char *_objname) print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); - printf(" type:%s end:%d\n", - orc_type_name(orc[i].type), orc[i].end); + printf(" type:%s signal:%d end:%d\n", + orc_type_name(orc[i].type), orc[i].signal, orc[i].end); } elf_end(elf); diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index 1f22b7ebae58..57a4527d5988 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -27,6 +27,7 @@ static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, } orc->end = cfi->end; + orc->signal = cfi->signal; if (cfi->cfa.base == CFI_UNDEFINED) { orc->sp_reg = ORC_REG_UNDEFINED; diff --git a/tools/objtool/special.c b/tools/objtool/special.c index 9c8d827f69af..baa85c31526b 100644 --- a/tools/objtool/special.c +++ b/tools/objtool/special.c @@ -26,7 +26,7 @@ struct special_entry { unsigned char key; /* jump_label key */ }; -struct special_entry entries[] = { +static const struct special_entry entries[] = { { .sec = ".altinstructions", .group = true, @@ -65,7 +65,7 @@ static void reloc_to_sec_off(struct reloc *reloc, struct section **sec, *off = reloc->sym->offset + reloc->addend; } -static int get_alt_entry(struct elf *elf, struct special_entry *entry, +static int get_alt_entry(struct elf *elf, const struct special_entry *entry, struct section *sec, int idx, struct special_alt *alt) { @@ -139,7 +139,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, */ int special_get_alts(struct elf *elf, struct list_head *alts) { - struct special_entry *entry; + const struct special_entry *entry; struct section *sec; unsigned int nr_entries; struct special_alt *alt; diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch index 0c6c7f456887..1c72d07cb9fe 100644 --- a/tools/scripts/Makefile.arch +++ b/tools/scripts/Makefile.arch @@ -5,7 +5,7 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ -e s/s390x/s390/ -e s/parisc64/parisc/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ - -e s/riscv.*/riscv/) + -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/) ifndef ARCH ARCH := $(HOSTARCH) @@ -34,6 +34,15 @@ ifeq ($(ARCH),sh64) SRCARCH := sh endif +# Additional ARCH settings for loongarch +ifeq ($(ARCH),loongarch32) + SRCARCH := loongarch +endif + +ifeq ($(ARCH),loongarch64) + SRCARCH := loongarch +endif + LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) ifeq ($(LP64), 1) IS_64_BIT := 1 diff --git a/tools/testing/selftests/bpf/progs/test_deny_namespace.c b/tools/testing/selftests/bpf/progs/test_deny_namespace.c index 09ad5a4ebd1f..591104e79812 100644 --- a/tools/testing/selftests/bpf/progs/test_deny_namespace.c +++ b/tools/testing/selftests/bpf/progs/test_deny_namespace.c @@ -6,7 +6,7 @@ #include <linux/capability.h> struct kernel_cap_struct { - __u32 cap[_LINUX_CAPABILITY_U32S_3]; + __u64 val; } __attribute__((preserve_access_index)); struct cred { @@ -19,14 +19,13 @@ SEC("lsm.s/userns_create") int BPF_PROG(test_userns_create, const struct cred *cred, int ret) { struct kernel_cap_struct caps = cred->cap_effective; - int cap_index = CAP_TO_INDEX(CAP_SYS_ADMIN); - __u32 cap_mask = CAP_TO_MASK(CAP_SYS_ADMIN); + __u64 cap_mask = BIT_LL(CAP_SYS_ADMIN); if (ret) return 0; ret = -EPERM; - if (caps.cap[cap_index] & cap_mask) + if (caps.val & cap_mask) return 0; return -EPERM; diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index 459741565222..a4f8e7c53c1f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -28,6 +28,9 @@ s390*) mips*) ARG1=%r4 ;; +loongarch*) + ARG1=%r4 +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index d4662c8cf407..1df61e13a812 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -40,6 +40,10 @@ mips*) GOODREG=%r4 BADREG=%r12 ;; +loongarch*) + GOODREG=%r4 + BADREG=%r12 +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 1750f91dd936..84a627c43795 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -67,6 +67,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs +TEST_GEN_PROGS_x86_64 += x86_64/hyperv_extended_hypercalls TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test @@ -200,6 +201,9 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \ $(KHDR_INCLUDES) +ifeq ($(ARCH),s390) + CFLAGS += -march=z10 +endif no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \ $(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 54680dc5887f..df10f1ffa20d 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -1112,8 +1112,6 @@ int main(int argc, char *argv[]) enum vm_mem_backing_src_type src_type; int opt; - setbuf(stdout, NULL); - src_type = DEFAULT_VM_MEM_SRC; while ((opt = getopt(argc, argv, "hm:s:")) != -1) { diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config index 63ed533f73d6..d011b38e259e 100644 --- a/tools/testing/selftests/kvm/config +++ b/tools/testing/selftests/kvm/config @@ -1,3 +1,4 @@ CONFIG_KVM=y CONFIG_KVM_INTEL=y CONFIG_KVM_AMD=y +CONFIG_USERFAULTFD=y diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h index 9218bb5f44bf..fa65b908b13e 100644 --- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h @@ -85,61 +85,110 @@ #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF /* HYPERV_CPUID_FEATURES.EAX */ -#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) -#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) -#define HV_MSR_SYNIC_AVAILABLE BIT(2) -#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) -#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) -#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) -#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) -#define HV_MSR_RESET_AVAILABLE BIT(7) -#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) -#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) -#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) -#define HV_ACCESS_FREQUENCY_MSRS BIT(11) -#define HV_ACCESS_REENLIGHTENMENT BIT(13) -#define HV_ACCESS_TSC_INVARIANT BIT(15) +#define HV_MSR_VP_RUNTIME_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 0) +#define HV_MSR_TIME_REF_COUNT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 1) +#define HV_MSR_SYNIC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 2) +#define HV_MSR_SYNTIMER_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 3) +#define HV_MSR_APIC_ACCESS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 4) +#define HV_MSR_HYPERCALL_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 5) +#define HV_MSR_VP_INDEX_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 6) +#define HV_MSR_RESET_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 7) +#define HV_MSR_STAT_PAGES_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 8) +#define HV_MSR_REFERENCE_TSC_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 9) +#define HV_MSR_GUEST_IDLE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 10) +#define HV_ACCESS_FREQUENCY_MSRS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 11) +#define HV_ACCESS_REENLIGHTENMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 13) +#define HV_ACCESS_TSC_INVARIANT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EAX, 15) /* HYPERV_CPUID_FEATURES.EBX */ -#define HV_CREATE_PARTITIONS BIT(0) -#define HV_ACCESS_PARTITION_ID BIT(1) -#define HV_ACCESS_MEMORY_POOL BIT(2) -#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) -#define HV_POST_MESSAGES BIT(4) -#define HV_SIGNAL_EVENTS BIT(5) -#define HV_CREATE_PORT BIT(6) -#define HV_CONNECT_PORT BIT(7) -#define HV_ACCESS_STATS BIT(8) -#define HV_DEBUGGING BIT(11) -#define HV_CPU_MANAGEMENT BIT(12) -#define HV_ISOLATION BIT(22) +#define HV_CREATE_PARTITIONS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 0) +#define HV_ACCESS_PARTITION_ID \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 1) +#define HV_ACCESS_MEMORY_POOL \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 2) +#define HV_ADJUST_MESSAGE_BUFFERS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 3) +#define HV_POST_MESSAGES \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 4) +#define HV_SIGNAL_EVENTS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 5) +#define HV_CREATE_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 6) +#define HV_CONNECT_PORT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 7) +#define HV_ACCESS_STATS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 8) +#define HV_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 11) +#define HV_CPU_MANAGEMENT \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 12) +#define HV_ENABLE_EXTENDED_HYPERCALLS \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 20) +#define HV_ISOLATION \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EBX, 22) /* HYPERV_CPUID_FEATURES.EDX */ -#define HV_X64_MWAIT_AVAILABLE BIT(0) -#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) -#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) -#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4) -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) -#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11) -#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) +#define HV_X64_MWAIT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 0) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 1) +#define HV_X64_PERF_MONITOR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 2) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 3) +#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 4) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 5) +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 8) +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 10) +#define HV_FEATURE_DEBUG_MSRS_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 11) +#define HV_STIMER_DIRECT_MODE_AVAILABLE \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_FEATURES, 0, EDX, 19) /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ -#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) -#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) -#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) -#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) -#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) -#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) +#define HV_X64_AS_SWITCH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 0) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 1) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 2) +#define HV_X64_APIC_ACCESS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 3) +#define HV_X64_SYSTEM_RESET_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 4) +#define HV_X64_RELAXED_TIMING_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 5) +#define HV_DEPRECATING_AEOI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 9) +#define HV_X64_CLUSTER_IPI_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 10) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 11) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 14) /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ -#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) +#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0, EAX, 1) /* Hypercalls */ #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 @@ -166,6 +215,9 @@ #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 +/* Extended hypercalls */ +#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001 + #define HV_FLUSH_ALL_PROCESSORS BIT(0) #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) @@ -288,4 +340,7 @@ struct hyperv_test_pages { struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, vm_vaddr_t *p_hv_pages_gva); +/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ +#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) + #endif /* !SELFTEST_KVM_HYPERV_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index b1a31de7108a..53ffa43c90db 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -19,6 +19,9 @@ #include "../kvm_util.h" +extern bool host_cpu_is_intel; +extern bool host_cpu_is_amd; + #define NMI_VECTOR 0x02 #define X86_EFLAGS_FIXED (1u << 1) @@ -137,6 +140,7 @@ struct kvm_x86_cpu_feature { #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26) #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27) #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29) +#define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8) #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4) #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12) #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0) @@ -554,6 +558,28 @@ static inline uint32_t this_cpu_model(void) return x86_model(this_cpu_fms()); } +static inline bool this_cpu_vendor_string_is(const char *vendor) +{ + const uint32_t *chunk = (const uint32_t *)vendor; + uint32_t eax, ebx, ecx, edx; + + cpuid(0, &eax, &ebx, &ecx, &edx); + return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); +} + +static inline bool this_cpu_is_intel(void) +{ + return this_cpu_vendor_string_is("GenuineIntel"); +} + +/* + * Exclude early K5 samples with a vendor string of "AMDisbetter!" + */ +static inline bool this_cpu_is_amd(void) +{ + return this_cpu_vendor_string_is("AuthenticAMD"); +} + static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, uint8_t reg, uint8_t lo, uint8_t hi) { @@ -690,9 +716,6 @@ static inline void cpu_relax(void) "hlt\n" \ ) -bool is_intel_cpu(void); -bool is_amd_cpu(void); - struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); void kvm_x86_state_cleanup(struct kvm_x86_state *state); @@ -716,7 +739,7 @@ static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); TEST_ASSERT(r == msrs->nmsrs, - "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", + "KVM_SET_MSRS failed, r: %i (failed on MSR %x)", r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); } static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c index 0b45ac593387..a7001e29dc06 100644 --- a/tools/testing/selftests/kvm/kvm_binary_stats_test.c +++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c @@ -19,6 +19,7 @@ #include "kvm_util.h" #include "asm/kvm.h" #include "linux/kvm.h" +#include "kselftest.h" static void stats_test(int stats_fd) { @@ -51,7 +52,7 @@ static void stats_test(int stats_fd) /* Sanity check for other fields in header */ if (header.num_desc == 0) { - printf("No KVM stats defined!"); + ksft_print_msg("No KVM stats defined!\n"); return; } /* @@ -133,7 +134,7 @@ static void stats_test(int stats_fd) "Bucket size of stats (%s) is not zero", pdesc->name); } - size_data += pdesc->size * sizeof(*stats_data); + size_data = max(size_data, pdesc->offset + pdesc->size * sizeof(*stats_data)); } /* @@ -148,14 +149,6 @@ static void stats_test(int stats_fd) TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data), "Data size is not correct"); - /* Check stats offset */ - for (i = 0; i < header.num_desc; ++i) { - pdesc = get_stats_descriptor(stats_desc, i, &header); - TEST_ASSERT(pdesc->offset < size_data, - "Invalid offset (%u) for stats: %s", - pdesc->offset, pdesc->name); - } - /* Allocate memory for stats data */ stats_data = malloc(size_data); TEST_ASSERT(stats_data, "Allocate memory for stats data"); @@ -224,9 +217,13 @@ int main(int argc, char *argv[]) max_vcpu = DEFAULT_NUM_VCPU; } + ksft_print_header(); + /* Check the extension for binary stats */ TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD)); + ksft_set_plan(max_vm); + /* Create VMs and VCPUs */ vms = malloc(sizeof(vms[0]) * max_vm); TEST_ASSERT(vms, "Allocate memory for storing VM pointers"); @@ -245,10 +242,12 @@ int main(int argc, char *argv[]) vm_stats_test(vms[i]); for (j = 0; j < max_vcpu; ++j) vcpu_stats_test(vcpus[i * max_vcpu + j]); + ksft_test_result_pass("vm%i\n", i); } for (i = 0; i < max_vm; ++i) kvm_vm_free(vms[i]); free(vms); - return 0; + + ksft_finished(); /* Print results and exit() accordingly */ } diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c index 820ac2d08c98..266f3876e10a 100644 --- a/tools/testing/selftests/kvm/lib/elf.c +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -90,6 +90,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp) " hdrp->e_shentsize: %x\n" " expected: %zx", hdrp->e_shentsize, sizeof(Elf64_Shdr)); + close(fd); } /* VM ELF Load @@ -190,4 +191,5 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) phdr.p_filesz); } } + close(fd); } diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 99a575bbbc52..1df3ce4b16fd 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -127,7 +127,7 @@ void guest_modes_cmdline(const char *arg) mode_selected = true; } - mode = strtoul(optarg, NULL, 10); + mode = atoi_non_negative("Guest mode ID", arg); TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode); guest_modes[mode].enabled = true; } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 56d5ea949cbb..3ea24a5f4c43 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1844,6 +1844,7 @@ static struct exit_reason { {KVM_EXIT_X86_RDMSR, "RDMSR"}, {KVM_EXIT_X86_WRMSR, "WRMSR"}, {KVM_EXIT_XEN, "XEN"}, + {KVM_EXIT_HYPERV, "HYPERV"}, #ifdef KVM_EXIT_MEMORY_NOT_PRESENT {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, #endif @@ -1941,9 +1942,6 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } -/* Arbitrary minimum physical address used for virtual translation tables. */ -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 - vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) { return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index acfa1d01e7df..ae1e573d94ce 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -19,6 +19,8 @@ #define MAX_NR_CPUID_ENTRIES 100 vm_vaddr_t exception_handlers; +bool host_cpu_is_amd; +bool host_cpu_is_intel; static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) { @@ -113,7 +115,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) bool kvm_is_tdp_enabled(void) { - if (is_intel_cpu()) + if (host_cpu_is_intel) return get_kvm_intel_param_bool("ept"); else return get_kvm_amd_param_bool("npt"); @@ -555,6 +557,8 @@ static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) void kvm_arch_vm_post_create(struct kvm_vm *vm) { vm_create_irqchip(vm); + sync_global_to_guest(vm, host_cpu_is_intel); + sync_global_to_guest(vm, host_cpu_is_amd); } struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, @@ -1006,28 +1010,6 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state) free(state); } -static bool cpu_vendor_string_is(const char *vendor) -{ - const uint32_t *chunk = (const uint32_t *)vendor; - uint32_t eax, ebx, ecx, edx; - - cpuid(0, &eax, &ebx, &ecx, &edx); - return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); -} - -bool is_intel_cpu(void) -{ - return cpu_vendor_string_is("GenuineIntel"); -} - -/* - * Exclude early K5 samples with a vendor string of "AMDisbetter!" - */ -bool is_amd_cpu(void) -{ - return cpu_vendor_string_is("AuthenticAMD"); -} - void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) { if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { @@ -1162,9 +1144,15 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, { uint64_t r; - asm volatile("vmcall" + asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" + "jnz 1f\n\t" + "vmcall\n\t" + "jmp 2f\n\t" + "1: vmmcall\n\t" + "2:" : "=a"(r) - : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); + : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3), + [use_vmmcall] "r" (host_cpu_is_amd)); return r; } @@ -1236,7 +1224,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; /* Avoid reserved HyperTransport region on AMD processors. */ - if (!is_amd_cpu()) + if (!host_cpu_is_amd) return max_gfn; /* On parts with <40 physical address bits, the area is fully hidden */ @@ -1276,3 +1264,9 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm) return get_kvm_intel_param_bool("unrestricted_guest"); } + +void kvm_selftest_arch_init(void) +{ + host_cpu_is_intel = this_cpu_is_intel(); + host_cpu_is_amd = this_cpu_is_amd(); +} diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index e6587e193490..4210cd21d159 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -308,8 +308,6 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); TEST_ASSERT(data->hva_slots, "malloc() fail"); - data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); - pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", data->nslots, data->pages_per_slot, rempages); @@ -349,6 +347,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + sync->guest_page_size = data->vm->page_size; atomic_init(&sync->start_flag, false); atomic_init(&sync->exit_flag, false); atomic_init(&sync->sync_flag, false); @@ -810,8 +809,6 @@ static bool test_execute(int nslots, uint64_t *maxslots, } sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); - - sync->guest_page_size = data->vm->page_size; if (tdata->prepare && !tdata->prepare(data, sync, maxslots)) { ret = false; diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index 3fd81e58f40c..8e4b94d7b8dd 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -9,6 +9,7 @@ #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> +#include <pthread.h> #include <linux/bits.h> @@ -26,6 +27,7 @@ enum mop_target { enum mop_access_mode { READ, WRITE, + CMPXCHG, }; struct mop_desc { @@ -44,57 +46,67 @@ struct mop_desc { enum mop_access_mode mode; void *buf; uint32_t sida_offset; + void *old; + uint8_t old_value[16]; + bool *cmpxchg_success; uint8_t ar; uint8_t key; }; -static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc) +const uint8_t NO_KEY = 0xff; + +static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) { struct kvm_s390_mem_op ksmo = { - .gaddr = (uintptr_t)desc.gaddr, - .size = desc.size, - .buf = ((uintptr_t)desc.buf), + .gaddr = (uintptr_t)desc->gaddr, + .size = desc->size, + .buf = ((uintptr_t)desc->buf), .reserved = "ignored_ignored_ignored_ignored" }; - switch (desc.target) { + switch (desc->target) { case LOGICAL: - if (desc.mode == READ) + if (desc->mode == READ) ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; - if (desc.mode == WRITE) + if (desc->mode == WRITE) ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; break; case SIDA: - if (desc.mode == READ) + if (desc->mode == READ) ksmo.op = KVM_S390_MEMOP_SIDA_READ; - if (desc.mode == WRITE) + if (desc->mode == WRITE) ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; break; case ABSOLUTE: - if (desc.mode == READ) + if (desc->mode == READ) ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ; - if (desc.mode == WRITE) + if (desc->mode == WRITE) ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; + if (desc->mode == CMPXCHG) { + ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG; + ksmo.old_addr = (uint64_t)desc->old; + memcpy(desc->old_value, desc->old, desc->size); + } break; case INVALID: ksmo.op = -1; } - if (desc.f_check) + if (desc->f_check) ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; - if (desc.f_inject) + if (desc->f_inject) ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION; - if (desc._set_flags) - ksmo.flags = desc.set_flags; - if (desc.f_key) { + if (desc->_set_flags) + ksmo.flags = desc->set_flags; + if (desc->f_key && desc->key != NO_KEY) { ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; - ksmo.key = desc.key; + ksmo.key = desc->key; } - if (desc._ar) - ksmo.ar = desc.ar; + if (desc->_ar) + ksmo.ar = desc->ar; else ksmo.ar = 0; - if (desc._sida_offset) - ksmo.sida_offset = desc.sida_offset; + if (desc->_sida_offset) + ksmo.sida_offset = desc->sida_offset; return ksmo; } @@ -133,9 +145,13 @@ static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksm case KVM_S390_MEMOP_ABSOLUTE_WRITE: printf("ABSOLUTE, WRITE, "); break; + case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG: + printf("ABSOLUTE, CMPXCHG, "); + break; } - printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u", - ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key); + printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx", + ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key, + ksmo->old_addr); if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY) printf(", CHECK_ONLY"); if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) @@ -145,24 +161,30 @@ static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksm puts(")"); } -static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) +static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo, + struct mop_desc *desc) { struct kvm_vcpu *vcpu = info.vcpu; if (!vcpu) - vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); + return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); else - vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo); + return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo); } -static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) +static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo, + struct mop_desc *desc) { - struct kvm_vcpu *vcpu = info.vcpu; + int r; - if (!vcpu) - return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); - else - return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo); + r = err_memop_ioctl(info, ksmo, desc); + if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) { + if (desc->cmpxchg_success) { + int diff = memcmp(desc->old_value, desc->old, desc->size); + *desc->cmpxchg_success = !diff; + } + } + TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r)); } #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \ @@ -183,9 +205,9 @@ static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) else \ __desc.gaddr = __desc.gaddr_v; \ } \ - __ksmo = ksmo_from_desc(__desc); \ + __ksmo = ksmo_from_desc(&__desc); \ print_memop(__info.vcpu, &__ksmo); \ - err##memop_ioctl(__info, &__ksmo); \ + err##memop_ioctl(__info, &__ksmo, &__desc); \ }) #define MOP(...) MEMOP(, __VA_ARGS__) @@ -199,6 +221,8 @@ static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) #define AR(a) ._ar = 1, .ar = (a) #define KEY(a) .f_key = 1, .key = (a) #define INJECT .f_inject = 1 +#define CMPXCHG_OLD(o) .old = (o) +#define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s) #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); }) @@ -208,8 +232,8 @@ static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) -static uint8_t mem1[65536]; -static uint8_t mem2[65536]; +static uint8_t __aligned(PAGE_SIZE) mem1[65536]; +static uint8_t __aligned(PAGE_SIZE) mem2[65536]; struct test_default { struct kvm_vm *kvm_vm; @@ -241,6 +265,8 @@ enum stage { STAGE_SKEYS_SET, /* Guest copied memory (locations up to test case) */ STAGE_COPIED, + /* End of guest code reached */ + STAGE_DONE, }; #define HOST_SYNC(info_p, stage) \ @@ -252,6 +278,9 @@ enum stage { \ vcpu_run(__vcpu); \ get_ucall(__vcpu, &uc); \ + if (uc.cmd == UCALL_ABORT) { \ + REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ + } \ ASSERT_EQ(uc.cmd, UCALL_SYNC); \ ASSERT_EQ(uc.args[1], __stage); \ }) \ @@ -268,34 +297,66 @@ static void prepare_mem12(void) #define ASSERT_MEM_EQ(p1, p2, size) \ TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") -#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ -({ \ - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ - enum mop_target __target = (mop_target_p); \ - uint32_t __size = (size); \ - \ - prepare_mem12(); \ - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ - GADDR_V(mem1), ##__VA_ARGS__); \ - HOST_SYNC(__copy_cpu, STAGE_COPIED); \ - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \ - GADDR_V(mem2), ##__VA_ARGS__); \ - ASSERT_MEM_EQ(mem1, mem2, __size); \ -}) +static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu, + enum mop_target mop_target, uint32_t size, uint8_t key) +{ + prepare_mem12(); + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, + GADDR_V(mem1), KEY(key)); + HOST_SYNC(copy_cpu, STAGE_COPIED); + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size, + GADDR_V(mem2), KEY(key)); + ASSERT_MEM_EQ(mem1, mem2, size); +} -#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ -({ \ - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ - enum mop_target __target = (mop_target_p); \ - uint32_t __size = (size); \ - \ - prepare_mem12(); \ - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ - GADDR_V(mem1)); \ - HOST_SYNC(__copy_cpu, STAGE_COPIED); \ - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\ - ASSERT_MEM_EQ(mem1, mem2, __size); \ -}) +static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, + enum mop_target mop_target, uint32_t size, uint8_t key) +{ + prepare_mem12(); + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1)); + HOST_SYNC(copy_cpu, STAGE_COPIED); + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size, + GADDR_V(mem2), KEY(key)); + ASSERT_MEM_EQ(mem1, mem2, size); +} + +static void default_cmpxchg(struct test_default *test, uint8_t key) +{ + for (int size = 1; size <= 16; size *= 2) { + for (int offset = 0; offset < 16; offset += size) { + uint8_t __aligned(16) new[16] = {}; + uint8_t __aligned(16) old[16]; + bool succ; + + prepare_mem12(); + default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY); + + memcpy(&old, mem1, 16); + MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset, + size, GADDR_V(mem1 + offset), + CMPXCHG_OLD(old + offset), + CMPXCHG_SUCCESS(&succ), KEY(key)); + HOST_SYNC(test->vcpu, STAGE_COPIED); + MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2)); + TEST_ASSERT(succ, "exchange of values should succeed"); + memcpy(mem1 + offset, new + offset, size); + ASSERT_MEM_EQ(mem1, mem2, 16); + + memcpy(&old, mem1, 16); + new[offset]++; + old[offset]++; + MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset, + size, GADDR_V(mem1 + offset), + CMPXCHG_OLD(old + offset), + CMPXCHG_SUCCESS(&succ), KEY(key)); + HOST_SYNC(test->vcpu, STAGE_COPIED); + MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2)); + TEST_ASSERT(!succ, "exchange of values should not succeed"); + ASSERT_MEM_EQ(mem1, mem2, 16); + ASSERT_MEM_EQ(&old, mem1, 16); + } + } +} static void guest_copy(void) { @@ -310,7 +371,7 @@ static void test_copy(void) HOST_SYNC(t.vcpu, STAGE_INITED); - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size); + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY); kvm_vm_free(t.kvm_vm); } @@ -357,26 +418,268 @@ static void test_copy_key(void) HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); /* vm, no key */ - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size); + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY); /* vm/vcpu, machting key or key 0 */ - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0)); - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9)); - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0)); - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9)); + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0); + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9); + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0); + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9); /* * There used to be different code paths for key handling depending on * if the region crossed a page boundary. * There currently are not, but the more tests the merrier. */ - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0)); - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9)); - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0)); - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9)); + default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0); + default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9); + default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0); + default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9); /* vm/vcpu, mismatching keys on read, but no fetch protection */ - DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2)); - DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2)); + default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2); + default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2); + + kvm_vm_free(t.kvm_vm); +} + +static void test_cmpxchg_key(void) +{ + struct test_default t = test_default_init(guest_copy_key); + + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + default_cmpxchg(&t, NO_KEY); + default_cmpxchg(&t, 0); + default_cmpxchg(&t, 9); + + kvm_vm_free(t.kvm_vm); +} + +static __uint128_t cut_to_size(int size, __uint128_t val) +{ + switch (size) { + case 1: + return (uint8_t)val; + case 2: + return (uint16_t)val; + case 4: + return (uint32_t)val; + case 8: + return (uint64_t)val; + case 16: + return val; + } + GUEST_ASSERT_1(false, "Invalid size"); + return 0; +} + +static bool popcount_eq(__uint128_t a, __uint128_t b) +{ + unsigned int count_a, count_b; + + count_a = __builtin_popcountl((uint64_t)(a >> 64)) + + __builtin_popcountl((uint64_t)a); + count_b = __builtin_popcountl((uint64_t)(b >> 64)) + + __builtin_popcountl((uint64_t)b); + return count_a == count_b; +} + +static __uint128_t rotate(int size, __uint128_t val, int amount) +{ + unsigned int bits = size * 8; + + amount = (amount + bits) % bits; + val = cut_to_size(size, val); + return (val << (bits - amount)) | (val >> amount); +} + +const unsigned int max_block = 16; + +static void choose_block(bool guest, int i, int *size, int *offset) +{ + unsigned int rand; + + rand = i; + if (guest) { + rand = rand * 19 + 11; + *size = 1 << ((rand % 3) + 2); + rand = rand * 19 + 11; + *offset = (rand % max_block) & ~(*size - 1); + } else { + rand = rand * 17 + 5; + *size = 1 << (rand % 5); + rand = rand * 17 + 5; + *offset = (rand % max_block) & ~(*size - 1); + } +} + +static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old) +{ + unsigned int rand; + int amount; + bool swap; + + rand = i; + rand = rand * 3 + 1; + if (guest) + rand = rand * 3 + 1; + swap = rand % 2 == 0; + if (swap) { + int i, j; + __uint128_t new; + uint8_t byte0, byte1; + + rand = rand * 3 + 1; + i = rand % size; + rand = rand * 3 + 1; + j = rand % size; + if (i == j) + return old; + new = rotate(16, old, i * 8); + byte0 = new & 0xff; + new &= ~0xff; + new = rotate(16, new, -i * 8); + new = rotate(16, new, j * 8); + byte1 = new & 0xff; + new = (new & ~0xff) | byte0; + new = rotate(16, new, -j * 8); + new = rotate(16, new, i * 8); + new = new | byte1; + new = rotate(16, new, -i * 8); + return new; + } + rand = rand * 3 + 1; + amount = rand % (size * 8); + return rotate(size, old, amount); +} + +static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new) +{ + bool ret; + + switch (size) { + case 4: { + uint32_t old = *old_addr; + + asm volatile ("cs %[old],%[new],%[address]" + : [old] "+d" (old), + [address] "+Q" (*(uint32_t *)(target)) + : [new] "d" ((uint32_t)new) + : "cc" + ); + ret = old == (uint32_t)*old_addr; + *old_addr = old; + return ret; + } + case 8: { + uint64_t old = *old_addr; + + asm volatile ("csg %[old],%[new],%[address]" + : [old] "+d" (old), + [address] "+Q" (*(uint64_t *)(target)) + : [new] "d" ((uint64_t)new) + : "cc" + ); + ret = old == (uint64_t)*old_addr; + *old_addr = old; + return ret; + } + case 16: { + __uint128_t old = *old_addr; + + asm volatile ("cdsg %[old],%[new],%[address]" + : [old] "+d" (old), + [address] "+Q" (*(__uint128_t *)(target)) + : [new] "d" (new) + : "cc" + ); + ret = old == *old_addr; + *old_addr = old; + return ret; + } + } + GUEST_ASSERT_1(false, "Invalid size"); + return 0; +} + +const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000; + +static void guest_cmpxchg_key(void) +{ + int size, offset; + __uint128_t old, new; + + set_storage_key_range(mem1, max_block, 0x10); + set_storage_key_range(mem2, max_block, 0x10); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (int i = 0; i < cmpxchg_iter_outer; i++) { + do { + old = 1; + } while (!_cmpxchg(16, mem1, &old, 0)); + for (int j = 0; j < cmpxchg_iter_inner; j++) { + choose_block(true, i + j, &size, &offset); + do { + new = permutate_bits(true, i + j, size, old); + } while (!_cmpxchg(size, mem2 + offset, &old, new)); + } + } + + GUEST_SYNC(STAGE_DONE); +} + +static void *run_guest(void *data) +{ + struct test_info *info = data; + + HOST_SYNC(*info, STAGE_DONE); + return NULL; +} + +static char *quad_to_char(__uint128_t *quad, int size) +{ + return ((char *)quad) + (sizeof(*quad) - size); +} + +static void test_cmpxchg_key_concurrent(void) +{ + struct test_default t = test_default_init(guest_cmpxchg_key); + int size, offset; + __uint128_t old, new; + bool success; + pthread_t thread; + + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + prepare_mem12(); + MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2)); + pthread_create(&thread, NULL, run_guest, &t.vcpu); + + for (int i = 0; i < cmpxchg_iter_outer; i++) { + do { + old = 0; + new = 1; + MOP(t.vm, ABSOLUTE, CMPXCHG, &new, + sizeof(new), GADDR_V(mem1), + CMPXCHG_OLD(&old), + CMPXCHG_SUCCESS(&success), KEY(1)); + } while (!success); + for (int j = 0; j < cmpxchg_iter_inner; j++) { + choose_block(false, i + j, &size, &offset); + do { + new = permutate_bits(false, i + j, size, old); + MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size), + size, GADDR_V(mem2 + offset), + CMPXCHG_OLD(quad_to_char(&old, size)), + CMPXCHG_SUCCESS(&success), KEY(1)); + } while (!success); + } + } + + pthread_join(thread, NULL); + + MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2)); + TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2), + "Must retain number of set bits"); kvm_vm_free(t.kvm_vm); } @@ -409,7 +712,7 @@ static void test_copy_key_storage_prot_override(void) HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); /* vcpu, mismatching keys, storage protection override in effect */ - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2)); + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2); kvm_vm_free(t.kvm_vm); } @@ -422,8 +725,8 @@ static void test_copy_key_fetch_prot(void) HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); /* vm/vcpu, matching key, fetch protection in effect */ - DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9)); - DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9)); + default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9); + default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9); kvm_vm_free(t.kvm_vm); } @@ -454,9 +757,27 @@ static void test_errors_key(void) /* vm/vcpu, mismatching keys, fetch protection in effect */ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); - CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2)); CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); - CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_cmpxchg_key(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + int i; + + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + for (i = 1; i <= 16; i *= 2) { + __uint128_t old = 0; + + ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2), + CMPXCHG_OLD(&old), KEY(2)); + } kvm_vm_free(t.kvm_vm); } @@ -518,7 +839,7 @@ static void guest_copy_key_fetch_prot_override(void) GUEST_SYNC(STAGE_INITED); set_storage_key_range(0, PAGE_SIZE, 0x18); set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0); - asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc"); + asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc"); GUEST_SYNC(STAGE_SKEYS_SET); for (;;) { @@ -606,7 +927,7 @@ static void test_errors_key_fetch_prot_override_enabled(void) /* * vcpu, mismatching keys on fetch, - * fetch protection override does not apply because memory range acceeded + * fetch protection override does not apply because memory range exceeded */ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2)); CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1, @@ -645,7 +966,9 @@ static void _test_errors_common(struct test_info info, enum mop_target target, i /* Bad guest address: */ rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY); - TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access"); + TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY"); + rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL)); + TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write"); /* Bad host address: */ rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1)); @@ -694,85 +1017,138 @@ static void test_errors(void) kvm_vm_free(t.kvm_vm); } -struct testdef { - const char *name; - void (*test)(void); - int extension; -} testlist[] = { - { - .name = "simple copy", - .test = test_copy, - }, - { - .name = "generic error checks", - .test = test_errors, - }, - { - .name = "copy with storage keys", - .test = test_copy_key, - .extension = 1, - }, - { - .name = "copy with key storage protection override", - .test = test_copy_key_storage_prot_override, - .extension = 1, - }, - { - .name = "copy with key fetch protection", - .test = test_copy_key_fetch_prot, - .extension = 1, - }, - { - .name = "copy with key fetch protection override", - .test = test_copy_key_fetch_prot_override, - .extension = 1, - }, - { - .name = "error checks with key", - .test = test_errors_key, - .extension = 1, - }, - { - .name = "termination", - .test = test_termination, - .extension = 1, - }, - { - .name = "error checks with key storage protection override", - .test = test_errors_key_storage_prot_override, - .extension = 1, - }, - { - .name = "error checks without key fetch prot override", - .test = test_errors_key_fetch_prot_override_not_enabled, - .extension = 1, - }, - { - .name = "error checks with key fetch prot override", - .test = test_errors_key_fetch_prot_override_enabled, - .extension = 1, - }, -}; +static void test_errors_cmpxchg(void) +{ + struct test_default t = test_default_init(guest_idle); + __uint128_t old; + int rv, i, power = 1; + + HOST_SYNC(t.vcpu, STAGE_INITED); + + for (i = 0; i < 32; i++) { + if (i == power) { + power *= 2; + continue; + } + rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1), + CMPXCHG_OLD(&old)); + TEST_ASSERT(rv == -1 && errno == EINVAL, + "ioctl allows bad size for cmpxchg"); + } + for (i = 1; i <= 16; i *= 2) { + rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL), + CMPXCHG_OLD(&old)); + TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg"); + } + for (i = 2; i <= 16; i *= 2) { + rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1), + CMPXCHG_OLD(&old)); + TEST_ASSERT(rv == -1 && errno == EINVAL, + "ioctl allows bad alignment for cmpxchg"); + } + + kvm_vm_free(t.kvm_vm); +} int main(int argc, char *argv[]) { int extension_cap, idx; TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP)); + extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); - ksft_print_header(); + struct testdef { + const char *name; + void (*test)(void); + bool requirements_met; + } testlist[] = { + { + .name = "simple copy", + .test = test_copy, + .requirements_met = true, + }, + { + .name = "generic error checks", + .test = test_errors, + .requirements_met = true, + }, + { + .name = "copy with storage keys", + .test = test_copy_key, + .requirements_met = extension_cap > 0, + }, + { + .name = "cmpxchg with storage keys", + .test = test_cmpxchg_key, + .requirements_met = extension_cap & 0x2, + }, + { + .name = "concurrently cmpxchg with storage keys", + .test = test_cmpxchg_key_concurrent, + .requirements_met = extension_cap & 0x2, + }, + { + .name = "copy with key storage protection override", + .test = test_copy_key_storage_prot_override, + .requirements_met = extension_cap > 0, + }, + { + .name = "copy with key fetch protection", + .test = test_copy_key_fetch_prot, + .requirements_met = extension_cap > 0, + }, + { + .name = "copy with key fetch protection override", + .test = test_copy_key_fetch_prot_override, + .requirements_met = extension_cap > 0, + }, + { + .name = "error checks with key", + .test = test_errors_key, + .requirements_met = extension_cap > 0, + }, + { + .name = "error checks for cmpxchg with key", + .test = test_errors_cmpxchg_key, + .requirements_met = extension_cap & 0x2, + }, + { + .name = "error checks for cmpxchg", + .test = test_errors_cmpxchg, + .requirements_met = extension_cap & 0x2, + }, + { + .name = "termination", + .test = test_termination, + .requirements_met = extension_cap > 0, + }, + { + .name = "error checks with key storage protection override", + .test = test_errors_key_storage_prot_override, + .requirements_met = extension_cap > 0, + }, + { + .name = "error checks without key fetch prot override", + .test = test_errors_key_fetch_prot_override_not_enabled, + .requirements_met = extension_cap > 0, + }, + { + .name = "error checks with key fetch prot override", + .test = test_errors_key_fetch_prot_override_enabled, + .requirements_met = extension_cap > 0, + }, + }; + ksft_print_header(); ksft_set_plan(ARRAY_SIZE(testlist)); - extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { - if (extension_cap >= testlist[idx].extension) { + if (testlist[idx].requirements_met) { testlist[idx].test(); ksft_test_result_pass("%s\n", testlist[idx].name); } else { - ksft_test_result_skip("%s - extension level %d not supported\n", - testlist[idx].name, - testlist[idx].extension); + ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n", + testlist[idx].name, extension_cap); } } diff --git a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c index 37c61f712fd5..e334844d6e1d 100644 --- a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c +++ b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c @@ -26,9 +26,6 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; - /* Tell stdout not to buffer its content */ - setbuf(stdout, NULL); - TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE)); vm = vm_create_with_one_vcpu(&vcpu, guest_code); diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c index 32f7e09ef67c..0f728f05ea82 100644 --- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c @@ -48,10 +48,10 @@ static void guest_main(void) const uint8_t *other_hypercall_insn; uint64_t ret; - if (is_intel_cpu()) { + if (host_cpu_is_intel) { native_hypercall_insn = vmx_vmcall; other_hypercall_insn = svm_vmmcall; - } else if (is_amd_cpu()) { + } else if (host_cpu_is_amd) { native_hypercall_insn = svm_vmmcall; other_hypercall_insn = vmx_vmcall; } else { diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index d576bc8ce823..2ee0af0d449e 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -104,7 +104,7 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_ /* Set Guest OS id to enable Hyper-V emulation */ GUEST_SYNC(1); - wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48); + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); GUEST_SYNC(2); check_tsc_msr_rdtsc(); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c new file mode 100644 index 000000000000..73af44d2167f --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001), + * exit to userspace and receive result in guest. + * + * Negative tests are present in hyperv_features.c + * + * Copyright 2022 Google LLC + * Author: Vipin Sharma <vipinsh@google.com> + */ + +#include "kvm_util.h" +#include "processor.h" +#include "hyperv.h" + +/* Any value is fine */ +#define EXT_CAPABILITIES 0xbull + +static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, + vm_vaddr_t out_pg_gva) +{ + uint64_t *output_gva; + + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); + wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa); + + output_gva = (uint64_t *)out_pg_gva; + + hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa); + + /* TLFS states output will be a uint64_t value */ + GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES); + + GUEST_DONE(); +} + +int main(void) +{ + vm_vaddr_t hcall_out_page; + vm_vaddr_t hcall_in_page; + struct kvm_vcpu *vcpu; + struct kvm_run *run; + struct kvm_vm *vm; + uint64_t *outval; + struct ucall uc; + + /* Verify if extended hypercalls are supported */ + if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(), + HV_ENABLE_EXTENDED_HYPERCALLS)) { + print_skip("Extended calls not supported by the kernel"); + exit(KSFT_SKIP); + } + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + run = vcpu->run; + vcpu_set_hv_cpuid(vcpu); + + /* Hypercall input */ + hcall_in_page = vm_vaddr_alloc_pages(vm, 1); + memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); + + /* Hypercall output */ + hcall_out_page = vm_vaddr_alloc_pages(vm, 1); + memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); + + vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), + addr_gva2gpa(vm, hcall_out_page), hcall_out_page); + + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERV, + "Unexpected exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + + outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]); + *outval = EXT_CAPABILITIES; + run->hyperv.u.hcall.result = HV_STATUS_SUCCESS; + + vcpu_run(vcpu); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s)", + run->exit_reason, exit_reason_str(run->exit_reason)); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld"); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unhandled ucall: %ld", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 3163c3e8db0a..c5e3b39edd07 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -13,9 +13,17 @@ #include "processor.h" #include "hyperv.h" +/* + * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf + * but to activate the feature it is sufficient to set it to a non-zero + * value. Use BIT(0) for that. + */ +#define HV_PV_SPINLOCKS_TEST \ + KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) + struct msr_data { uint32_t idx; - bool available; + bool fault_expected; bool write; u64 write_val; }; @@ -26,22 +34,46 @@ struct hcall_data { bool ud_expected; }; +static bool is_write_only_msr(uint32_t msr) +{ + return msr == HV_X64_MSR_EOI; +} + static void guest_msr(struct msr_data *msr) { - uint64_t ignored; - uint8_t vector; + uint8_t vector = 0; + uint64_t msr_val = 0; GUEST_ASSERT(msr->idx); - if (!msr->write) - vector = rdmsr_safe(msr->idx, &ignored); - else + if (msr->write) vector = wrmsr_safe(msr->idx, msr->write_val); - if (msr->available) - GUEST_ASSERT_2(!vector, msr->idx, vector); + if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) + vector = rdmsr_safe(msr->idx, &msr_val); + + if (msr->fault_expected) + GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR); else - GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); + GUEST_ASSERT_3(!vector, msr->idx, vector, 0); + + if (vector || is_write_only_msr(msr->idx)) + goto done; + + if (msr->write) + GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx, + msr_val, msr->write_val); + + /* Invariant TSC bit appears when TSC invariant control MSR is written to */ + if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) { + if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT)) + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC)); + else + GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) == + !!(msr_val & HV_INVARIANT_TSC_EXPOSED)); + } + +done: GUEST_DONE(); } @@ -89,7 +121,6 @@ static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu) static void guest_test_msrs_access(void) { struct kvm_cpuid2 *prev_cpuid = NULL; - struct kvm_cpuid_entry2 *feat, *dbg; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; @@ -97,6 +128,7 @@ static void guest_test_msrs_access(void) int stage = 0; vm_vaddr_t msr_gva; struct msr_data *msr; + bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_msr); @@ -116,9 +148,6 @@ static void guest_test_msrs_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); @@ -134,133 +163,139 @@ static void guest_test_msrs_access(void) * Only available when Hyper-V identification is set */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 1: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 2: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); /* * HV_X64_MSR_GUEST_OS_ID has to be written first to make * HV_X64_MSR_HYPERCALL available. */ msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 1; + msr->write = true; msr->write_val = HYPERV_LINUX_OS_ID; - msr->available = 1; + msr->fault_expected = false; break; case 3: msr->idx = HV_X64_MSR_GUEST_OS_ID; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 4: msr->idx = HV_X64_MSR_HYPERCALL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 5: msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 6: - feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE); msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 7: /* Read only */ msr->idx = HV_X64_MSR_VP_RUNTIME; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 8: msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 9: - feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE); msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 10: /* Read only */ msr->idx = HV_X64_MSR_TIME_REF_COUNT; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 11: msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 12: - feat->eax |= HV_MSR_VP_INDEX_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE); msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 13: /* Read only */ msr->idx = HV_X64_MSR_VP_INDEX; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 14: msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 15: - feat->eax |= HV_MSR_RESET_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE); msr->idx = HV_X64_MSR_RESET; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 16: msr->idx = HV_X64_MSR_RESET; - msr->write = 1; + msr->write = true; + /* + * TODO: the test only writes '0' to HV_X64_MSR_RESET + * at the moment, writing some other value there will + * trigger real vCPU reset and the code is not prepared + * to handle it yet. + */ msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 17: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 18: - feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE); msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 19: msr->idx = HV_X64_MSR_REFERENCE_TSC; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 20: msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 21: /* @@ -268,149 +303,185 @@ static void guest_test_msrs_access(void) * capability enabled and guest visible CPUID bit unset. */ msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 22: - feat->eax |= HV_MSR_SYNIC_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE); msr->idx = HV_X64_MSR_EOM; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 23: msr->idx = HV_X64_MSR_EOM; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 24: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 25: - feat->eax |= HV_MSR_SYNTIMER_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 26: msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 27: /* Direct mode test */ msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 0; + msr->fault_expected = true; break; case 28: - feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE); msr->idx = HV_X64_MSR_STIMER0_CONFIG; - msr->write = 1; + msr->write = true; msr->write_val = 1 << 12; - msr->available = 1; + msr->fault_expected = false; break; case 29: msr->idx = HV_X64_MSR_EOI; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 30: - feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE); msr->idx = HV_X64_MSR_EOI; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 31: msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 32: - feat->eax |= HV_ACCESS_FREQUENCY_MSRS; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS); msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 33: /* Read only */ msr->idx = HV_X64_MSR_TSC_FREQUENCY; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 34: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 35: - feat->eax |= HV_ACCESS_REENLIGHTENMENT; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT); msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 36: msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 37: /* Can only write '0' */ msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 0; + msr->fault_expected = true; break; case 38: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 39: - feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE); msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 40: msr->idx = HV_X64_MSR_CRASH_P0; - msr->write = 1; + msr->write = true; msr->write_val = 1; - msr->available = 1; + msr->fault_expected = false; break; case 41: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 0; + msr->write = false; + msr->fault_expected = true; break; case 42: - feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE); + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 0; - msr->available = 1; + msr->write = false; + msr->fault_expected = false; break; case 43: msr->idx = HV_X64_MSR_SYNDBG_STATUS; - msr->write = 1; + msr->write = true; msr->write_val = 0; - msr->available = 1; + msr->fault_expected = false; break; case 44: + /* MSR is not available when CPUID feature bit is unset */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = true; + break; + case 45: + /* MSR is vailable when CPUID feature bit is set */ + if (!has_invtsc) + continue; + vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT); + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = false; + msr->fault_expected = false; + break; + case 46: + /* Writing bits other than 0 is forbidden */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 0xdeadbeef; + msr->fault_expected = true; + break; + case 47: + /* Setting bit 0 enables the feature */ + if (!has_invtsc) + continue; + msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; + msr->write = true; + msr->write_val = 1; + msr->fault_expected = false; + break; + + default: kvm_vm_free(vm); return; } @@ -429,7 +500,7 @@ static void guest_test_msrs_access(void) switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: - REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx"); + REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx"); return; case UCALL_DONE: break; @@ -445,7 +516,6 @@ static void guest_test_msrs_access(void) static void guest_test_hcalls_access(void) { - struct kvm_cpuid_entry2 *feat, *recomm, *dbg; struct kvm_cpuid2 *prev_cpuid = NULL; struct kvm_vcpu *vcpu; struct kvm_run *run; @@ -480,15 +550,11 @@ static void guest_test_hcalls_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); - recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); - dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); - run = vcpu->run; switch (stage) { case 0: - feat->eax |= HV_MSR_HYPERCALL_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); hcall->control = 0xbeef; hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; @@ -498,7 +564,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 2: - feat->ebx |= HV_POST_MESSAGES; + vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES); hcall->control = HVCALL_POST_MESSAGE; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -508,7 +574,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 4: - feat->ebx |= HV_SIGNAL_EVENTS; + vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS); hcall->control = HVCALL_SIGNAL_EVENT; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -518,12 +584,12 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE; break; case 6: - dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 7: - feat->ebx |= HV_DEBUGGING; + vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING); hcall->control = HVCALL_RESET_DEBUG_SESSION; hcall->expect = HV_STATUS_OPERATION_DENIED; break; @@ -533,7 +599,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 9: - recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE; hcall->expect = HV_STATUS_SUCCESS; break; @@ -542,7 +608,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 11: - recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX; hcall->expect = HV_STATUS_SUCCESS; break; @@ -552,7 +618,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 13: - recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; + vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED); hcall->control = HVCALL_SEND_IPI; hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT; break; @@ -567,7 +633,7 @@ static void guest_test_hcalls_access(void) hcall->expect = HV_STATUS_ACCESS_DENIED; break; case 16: - recomm->ebx = 0xfff; + vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST); hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT; hcall->expect = HV_STATUS_SUCCESS; break; @@ -577,12 +643,21 @@ static void guest_test_hcalls_access(void) hcall->ud_expected = true; break; case 18: - feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; + vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE); hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT; hcall->ud_expected = false; hcall->expect = HV_STATUS_SUCCESS; break; case 19: + hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES; + hcall->expect = HV_STATUS_ACCESS_DENIED; + break; + case 20: + vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS); + hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT; + hcall->expect = HV_STATUS_INVALID_PARAMETER; + break; + case 21: kvm_vm_free(vm); return; } diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c index fb02581953a3..ce1ccc4c1503 100644 --- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c +++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c @@ -93,7 +93,7 @@ int main(void) { int warnings_before, warnings_after; - TEST_REQUIRE(is_intel_cpu()); + TEST_REQUIRE(host_cpu_is_intel); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 2de98fce7edd..bad7ef8c5b92 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -198,14 +198,15 @@ static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents) static struct kvm_pmu_event_filter * -create_pmu_event_filter(const uint64_t event_list[], - int nevents, uint32_t action) +create_pmu_event_filter(const uint64_t event_list[], int nevents, + uint32_t action, uint32_t flags) { struct kvm_pmu_event_filter *f; int i; f = alloc_pmu_event_filter(nevents); f->action = action; + f->flags = flags; for (i = 0; i < nevents; i++) f->events[i] = event_list[i]; @@ -216,7 +217,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action) { return create_pmu_event_filter(event_list, ARRAY_SIZE(event_list), - action); + action, 0); } /* @@ -263,7 +264,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) struct kvm_pmu_event_filter *f; uint64_t count; - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY); + f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); count = test_with_filter(vcpu, f); free(f); @@ -363,7 +364,7 @@ static void test_pmu_config_disable(void (*guest_code)(void)) */ static bool use_intel_pmu(void) { - return is_intel_cpu() && + return host_cpu_is_intel && kvm_cpu_property(X86_PROPERTY_PMU_VERSION) && kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) && kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED); @@ -397,19 +398,378 @@ static bool use_amd_pmu(void) uint32_t family = kvm_cpu_family(); uint32_t model = kvm_cpu_model(); - return is_amd_cpu() && + return host_cpu_is_amd && (is_zen1(family, model) || is_zen2(family, model) || is_zen3(family, model)); } +/* + * "MEM_INST_RETIRED.ALL_LOADS", "MEM_INST_RETIRED.ALL_STORES", and + * "MEM_INST_RETIRED.ANY" from https://perfmon-events.intel.com/ + * supported on Intel Xeon processors: + * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake. + */ +#define MEM_INST_RETIRED 0xD0 +#define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81) +#define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82) +#define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83) + +static bool supports_event_mem_inst_retired(void) +{ + uint32_t eax, ebx, ecx, edx; + + cpuid(1, &eax, &ebx, &ecx, &edx); + if (x86_family(eax) == 0x6) { + switch (x86_model(eax)) { + /* Sapphire Rapids */ + case 0x8F: + /* Ice Lake */ + case 0x6A: + /* Skylake */ + /* Cascade Lake */ + case 0x55: + return true; + } + } + + return false; +} + +/* + * "LS Dispatch", from Processor Programming Reference + * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, + * Preliminary Processor Programming Reference (PPR) for AMD Family + * 17h Model 31h, Revision B0 Processors, and Preliminary Processor + * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision + * B1 Processors Volume 1 of 2. + */ +#define LS_DISPATCH 0x29 +#define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0)) +#define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1)) +#define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2)) + +#define INCLUDE_MASKED_ENTRY(event_select, mask, match) \ + KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false) +#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ + KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) + +struct perf_counter { + union { + uint64_t raw; + struct { + uint64_t loads:22; + uint64_t stores:22; + uint64_t loads_stores:20; + }; + }; +}; + +static uint64_t masked_events_guest_test(uint32_t msr_base) +{ + uint64_t ld0, ld1, st0, st1, ls0, ls1; + struct perf_counter c; + int val; + + /* + * The acutal value of the counters don't determine the outcome of + * the test. Only that they are zero or non-zero. + */ + ld0 = rdmsr(msr_base + 0); + st0 = rdmsr(msr_base + 1); + ls0 = rdmsr(msr_base + 2); + + __asm__ __volatile__("movl $0, %[v];" + "movl %[v], %%eax;" + "incl %[v];" + : [v]"+m"(val) :: "eax"); + + ld1 = rdmsr(msr_base + 0); + st1 = rdmsr(msr_base + 1); + ls1 = rdmsr(msr_base + 2); + + c.loads = ld1 - ld0; + c.stores = st1 - st0; + c.loads_stores = ls1 - ls0; + + return c.raw; +} + +static void intel_masked_events_guest_code(void) +{ + uint64_t r; + + for (;;) { + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + + wrmsr(MSR_P6_EVNTSEL0 + 0, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD); + wrmsr(MSR_P6_EVNTSEL0 + 1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_STORE); + wrmsr(MSR_P6_EVNTSEL0 + 2, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD_STORE); + + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7); + + r = masked_events_guest_test(MSR_IA32_PMC0); + + GUEST_SYNC(r); + } +} + +static void amd_masked_events_guest_code(void) +{ + uint64_t r; + + for (;;) { + wrmsr(MSR_K7_EVNTSEL0, 0); + wrmsr(MSR_K7_EVNTSEL1, 0); + wrmsr(MSR_K7_EVNTSEL2, 0); + + wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD); + wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_STORE); + wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE); + + r = masked_events_guest_test(MSR_K7_PERFCTR0); + + GUEST_SYNC(r); + } +} + +static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu, + const uint64_t masked_events[], + const int nmasked_events) +{ + struct kvm_pmu_event_filter *f; + struct perf_counter r; + + f = create_pmu_event_filter(masked_events, nmasked_events, + KVM_PMU_EVENT_ALLOW, + KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r.raw = test_with_filter(vcpu, f); + free(f); + + return r; +} + +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ +#define MAX_FILTER_EVENTS 300 +#define MAX_TEST_EVENTS 10 + +#define ALLOW_LOADS BIT(0) +#define ALLOW_STORES BIT(1) +#define ALLOW_LOADS_STORES BIT(2) + +struct masked_events_test { + uint64_t intel_events[MAX_TEST_EVENTS]; + uint64_t intel_event_end; + uint64_t amd_events[MAX_TEST_EVENTS]; + uint64_t amd_event_end; + const char *msg; + uint32_t flags; +}; + +/* + * These are the test cases for the masked events tests. + * + * For each test, the guest enables 3 PMU counters (loads, stores, + * loads + stores). The filter is then set in KVM with the masked events + * provided. The test then verifies that the counters agree with which + * ones should be counting and which ones should be filtered. + */ +const struct masked_events_test test_cases[] = { + { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x81), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)), + }, + .msg = "Only allow loads.", + .flags = ALLOW_LOADS, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)), + }, + .msg = "Only allow stores.", + .flags = ALLOW_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(2)), + }, + .msg = "Only allow loads + stores.", + .flags = ALLOW_LOADS_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, ~(BIT(0) | BIT(1)), 0), + }, + .msg = "Only allow loads and stores.", + .flags = ALLOW_LOADS | ALLOW_STORES, + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)), + }, + .msg = "Only allow loads and loads + stores.", + .flags = ALLOW_LOADS | ALLOW_LOADS_STORES + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFE, 0x82), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)), + }, + .msg = "Only allow stores and loads + stores.", + .flags = ALLOW_STORES | ALLOW_LOADS_STORES + }, { + .intel_events = { + INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0), + }, + .amd_events = { + INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0), + }, + .msg = "Only allow loads, stores, and loads + stores.", + .flags = ALLOW_LOADS | ALLOW_STORES | ALLOW_LOADS_STORES + }, +}; + +static int append_test_events(const struct masked_events_test *test, + uint64_t *events, int nevents) +{ + const uint64_t *evts; + int i; + + evts = use_intel_pmu() ? test->intel_events : test->amd_events; + for (i = 0; i < MAX_TEST_EVENTS; i++) { + if (evts[i] == 0) + break; + + events[nevents + i] = evts[i]; + } + + return nevents + i; +} + +static bool bool_eq(bool a, bool b) +{ + return a == b; +} + +static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, + int nevents) +{ + int ntests = ARRAY_SIZE(test_cases); + struct perf_counter c; + int i, n; + + for (i = 0; i < ntests; i++) { + const struct masked_events_test *test = &test_cases[i]; + + /* Do any test case events overflow MAX_TEST_EVENTS? */ + assert(test->intel_event_end == 0); + assert(test->amd_event_end == 0); + + n = append_test_events(test, events, nevents); + + c = run_masked_events_test(vcpu, events, n); + TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) && + bool_eq(c.stores, test->flags & ALLOW_STORES) && + bool_eq(c.loads_stores, + test->flags & ALLOW_LOADS_STORES), + "%s loads: %u, stores: %u, loads + stores: %u", + test->msg, c.loads, c.stores, c.loads_stores); + } +} + +static void add_dummy_events(uint64_t *events, int nevents) +{ + int i; + + for (i = 0; i < nevents; i++) { + int event_select = i % 0xFF; + bool exclude = ((i % 4) == 0); + + if (event_select == MEM_INST_RETIRED || + event_select == LS_DISPATCH) + event_select++; + + events[i] = KVM_PMU_ENCODE_MASKED_ENTRY(event_select, 0, + 0, exclude); + } +} + +static void test_masked_events(struct kvm_vcpu *vcpu) +{ + int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS; + uint64_t events[MAX_FILTER_EVENTS]; + + /* Run the test cases against a sparse PMU event filter. */ + run_masked_events_tests(vcpu, events, 0); + + /* Run the test cases against a dense PMU event filter. */ + add_dummy_events(events, MAX_FILTER_EVENTS); + run_masked_events_tests(vcpu, events, nevents); +} + +static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events, + int nevents, uint32_t flags) +{ + struct kvm_pmu_event_filter *f; + int r; + + f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags); + r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); + free(f); + + return r; +} + +static void test_filter_ioctl(struct kvm_vcpu *vcpu) +{ + uint64_t e = ~0ul; + int r; + + /* + * Unfortunately having invalid bits set in event data is expected to + * pass when flags == 0 (bits other than eventsel+umask). + */ + r = run_filter_test(vcpu, &e, 1, 0); + TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); + + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail"); + + e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf); + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); +} + int main(int argc, char *argv[]) { void (*guest_code)(void); - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu, *vcpu2 = NULL; struct kvm_vm *vm; TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER)); + TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS)); TEST_REQUIRE(use_intel_pmu() || use_amd_pmu()); guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code; @@ -430,6 +790,17 @@ int main(int argc, char *argv[]) test_not_member_deny_list(vcpu); test_not_member_allow_list(vcpu); + if (use_intel_pmu() && + supports_event_mem_inst_retired() && + kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) >= 3) + vcpu2 = vm_vcpu_add(vm, 2, intel_masked_events_guest_code); + else if (use_amd_pmu()) + vcpu2 = vm_vcpu_add(vm, 2, amd_masked_events_guest_code); + + if (vcpu2) + test_masked_events(vcpu2); + test_filter_ioctl(vcpu); + kvm_vm_free(vm); test_pmu_config_disable(guest_code); diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c index 22d366c697f7..c9f67702f657 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c @@ -72,11 +72,16 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: - TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && - uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", - stage + 1, (ulong)uc.args[1]); + if (!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage + 1) + ksft_test_result_pass("stage %d passed\n", stage + 1); + else + ksft_test_result_fail( + "stage %d: Unexpected register values vmexit, got %lx", + stage + 1, (ulong)uc.args[1]); return; case UCALL_DONE: + ksft_test_result_pass("stage %d passed\n", stage + 1); return; case UCALL_ABORT: REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx"); @@ -92,6 +97,9 @@ int main(void) struct kvm_vm *vm; uint64_t val; + ksft_print_header(); + ksft_set_plan(5); + vm = vm_create_with_one_vcpu(&vcpu, guest_code); val = 0; @@ -149,5 +157,5 @@ int main(void) kvm_vm_free(vm); - return 0; + ksft_finished(); /* Print results and exit() accordingly */ } diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c index 2641b286b4ed..ccdfa5dc1a4d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c @@ -111,7 +111,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; - TEST_REQUIRE(is_intel_cpu()); + TEST_REQUIRE(host_cpu_is_intel); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); vm = vm_create_with_one_vcpu(&vcpu, guest_code); diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c index d7d37dae3eeb..396c13f42457 100644 --- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c @@ -132,6 +132,59 @@ static void test_icr(struct xapic_vcpu *x) __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK); } +static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) +{ + uint32_t apic_id, expected; + struct kvm_lapic_state xapic; + + vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base); + + vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic); + + expected = apic_base & X2APIC_ENABLE ? vcpu->id : vcpu->id << 24; + apic_id = *((u32 *)&xapic.regs[APIC_ID]); + + TEST_ASSERT(apic_id == expected, + "APIC_ID not set back to %s format; wanted = %x, got = %x", + (apic_base & X2APIC_ENABLE) ? "x2APIC" : "xAPIC", + expected, apic_id); +} + +/* + * Verify that KVM switches the APIC_ID between xAPIC and x2APIC when userspace + * stuffs MSR_IA32_APICBASE. Setting the APIC_ID when x2APIC is enabled and + * when the APIC transitions for DISABLED to ENABLED is architectural behavior + * (on Intel), whereas the x2APIC => xAPIC transition behavior is KVM ABI since + * attempted to transition from x2APIC to xAPIC without disabling the APIC is + * architecturally disallowed. + */ +static void test_apic_id(void) +{ + const uint32_t NR_VCPUS = 3; + struct kvm_vcpu *vcpus[NR_VCPUS]; + uint64_t apic_base; + struct kvm_vm *vm; + int i; + + vm = vm_create_with_vcpus(NR_VCPUS, NULL, vcpus); + vm_enable_cap(vm, KVM_CAP_X2APIC_API, KVM_X2APIC_API_USE_32BIT_IDS); + + for (i = 0; i < NR_VCPUS; i++) { + apic_base = vcpu_get_msr(vcpus[i], MSR_IA32_APICBASE); + + TEST_ASSERT(apic_base & MSR_IA32_APICBASE_ENABLE, + "APIC not in ENABLED state at vCPU RESET"); + TEST_ASSERT(!(apic_base & X2APIC_ENABLE), + "APIC not in xAPIC mode at vCPU RESET"); + + __test_apic_id(vcpus[i], apic_base); + __test_apic_id(vcpus[i], apic_base | X2APIC_ENABLE); + __test_apic_id(vcpus[i], apic_base); + } + + kvm_vm_free(vm); +} + int main(int argc, char *argv[]) { struct xapic_vcpu x = { @@ -157,4 +210,6 @@ int main(int argc, char *argv[]) virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); test_icr(&x); kvm_vm_free(vm); + + test_apic_id(); } diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 13c75dc18c10..5a3bf8f61417 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -19,9 +19,6 @@ #include <sys/eventfd.h> -/* Defined in include/linux/kvm_types.h */ -#define GPA_INVALID (~(ulong)0) - #define SHINFO_REGION_GVA 0xc0000000ULL #define SHINFO_REGION_GPA 0xc0000000ULL #define SHINFO_REGION_SLOT 10 @@ -412,21 +409,21 @@ static void *juggle_shinfo_state(void *arg) { struct kvm_vm *vm = (struct kvm_vm *)arg; - struct kvm_xen_hvm_attr cache_init = { + struct kvm_xen_hvm_attr cache_activate = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE }; - struct kvm_xen_hvm_attr cache_destroy = { + struct kvm_xen_hvm_attr cache_deactivate = { .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, - .u.shared_info.gfn = GPA_INVALID + .u.shared_info.gfn = KVM_XEN_INVALID_GFN }; for (;;) { - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_init); - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_destroy); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate); + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate); pthread_testcancel(); - }; + } return NULL; } diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 70ea8798b1f6..7da8ec838c63 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -9,7 +9,7 @@ ret=0 ksft_skip=4 # all tests in this script. Can be overridden with -t option -TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh" +TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh" VERBOSE=0 PAUSE_ON_FAIL=no @@ -655,6 +655,98 @@ fib_nexthop_test() cleanup } +fib6_notify_test() +{ + setup + + echo + echo "Fib6 info length calculation in route notify test" + set -e + + for i in 10 20 30 40 50 60 70; + do + $IP link add dummy_$i type dummy + $IP link set dev dummy_$i up + $IP -6 address add 2001:$i::1/64 dev dummy_$i + done + + $NS_EXEC ip monitor route &> errors.txt & + sleep 2 + + $IP -6 route add 2001::/64 \ + nexthop via 2001:10::2 dev dummy_10 \ + nexthop encap ip6 dst 2002::20 via 2001:20::2 dev dummy_20 \ + nexthop encap ip6 dst 2002::30 via 2001:30::2 dev dummy_30 \ + nexthop encap ip6 dst 2002::40 via 2001:40::2 dev dummy_40 \ + nexthop encap ip6 dst 2002::50 via 2001:50::2 dev dummy_50 \ + nexthop encap ip6 dst 2002::60 via 2001:60::2 dev dummy_60 \ + nexthop encap ip6 dst 2002::70 via 2001:70::2 dev dummy_70 + + set +e + + err=`cat errors.txt |grep "Message too long"` + if [ -z "$err" ];then + ret=0 + else + ret=1 + fi + + log_test $ret 0 "ipv6 route add notify" + + { kill %% && wait %%; } 2>/dev/null + + #rm errors.txt + + cleanup &> /dev/null +} + + +fib_notify_test() +{ + setup + + echo + echo "Fib4 info length calculation in route notify test" + + set -e + + for i in 10 20 30 40 50 60 70; + do + $IP link add dummy_$i type dummy + $IP link set dev dummy_$i up + $IP address add 20.20.$i.2/24 dev dummy_$i + done + + $NS_EXEC ip monitor route &> errors.txt & + sleep 2 + + $IP route add 10.0.0.0/24 \ + nexthop via 20.20.10.1 dev dummy_10 \ + nexthop encap ip dst 192.168.10.20 via 20.20.20.1 dev dummy_20 \ + nexthop encap ip dst 192.168.10.30 via 20.20.30.1 dev dummy_30 \ + nexthop encap ip dst 192.168.10.40 via 20.20.40.1 dev dummy_40 \ + nexthop encap ip dst 192.168.10.50 via 20.20.50.1 dev dummy_50 \ + nexthop encap ip dst 192.168.10.60 via 20.20.60.1 dev dummy_60 \ + nexthop encap ip dst 192.168.10.70 via 20.20.70.1 dev dummy_70 + + set +e + + err=`cat errors.txt |grep "Message too long"` + if [ -z "$err" ];then + ret=0 + else + ret=1 + fi + + log_test $ret 0 "ipv4 route add notify" + + { kill %% && wait %%; } 2>/dev/null + + rm errors.txt + + cleanup &> /dev/null +} + fib_suppress_test() { echo @@ -2111,6 +2203,8 @@ do fib_carrier_test|carrier) fib_carrier_test;; fib_rp_filter_test|rp_filter) fib_rp_filter_test;; fib_nexthop_test|nexthop) fib_nexthop_test;; + fib_notify_test|ipv4_notify) fib_notify_test;; + fib6_notify_test|ipv6_notify) fib6_notify_test;; fib_suppress_test|suppress) fib_suppress_test;; ipv6_route_test|ipv6_rt) ipv6_route_test;; ipv4_route_test|ipv4_rt) ipv4_route_test;; diff --git a/tools/testing/selftests/netfilter/rpath.sh b/tools/testing/selftests/netfilter/rpath.sh index f7311e66d219..5289c8447a41 100755 --- a/tools/testing/selftests/netfilter/rpath.sh +++ b/tools/testing/selftests/netfilter/rpath.sh @@ -62,10 +62,16 @@ ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad # firewall matches to test -[ -n "$iptables" ] && ip netns exec "$ns2" \ - "$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter -[ -n "$ip6tables" ] && ip netns exec "$ns2" \ - "$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter +[ -n "$iptables" ] && { + common='-t raw -A PREROUTING -s 192.168.0.0/16' + ip netns exec "$ns2" "$iptables" $common -m rpfilter + ip netns exec "$ns2" "$iptables" $common -m rpfilter --invert +} +[ -n "$ip6tables" ] && { + common='-t raw -A PREROUTING -s fec0::/16' + ip netns exec "$ns2" "$ip6tables" $common -m rpfilter + ip netns exec "$ns2" "$ip6tables" $common -m rpfilter --invert +} [ -n "$nft" ] && ip netns exec "$ns2" $nft -f - <<EOF table inet t { chain c { @@ -89,6 +95,11 @@ ipt_zero_rule() { # (command) [ -n "$1" ] || return 0 ip netns exec "$ns2" "$1" -t raw -vS | grep -q -- "-m rpfilter -c 0 0" } +ipt_zero_reverse_rule() { # (command) + [ -n "$1" ] || return 0 + ip netns exec "$ns2" "$1" -t raw -vS | \ + grep -q -- "-m rpfilter --invert -c 0 0" +} nft_zero_rule() { # (family) [ -n "$nft" ] || return 0 ip netns exec "$ns2" "$nft" list chain inet t c | \ @@ -101,8 +112,7 @@ netns_ping() { # (netns, args...) ip netns exec "$netns" ping -q -c 1 -W 1 "$@" >/dev/null } -testrun() { - # clear counters first +clear_counters() { [ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z [ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z if [ -n "$nft" ]; then @@ -111,6 +121,10 @@ testrun() { ip netns exec "$ns2" $nft -s list table inet t; ) | ip netns exec "$ns2" $nft -f - fi +} + +testrun() { + clear_counters # test 1: martian traffic should fail rpfilter matches netns_ping "$ns1" -I v0 192.168.42.1 && \ @@ -120,9 +134,13 @@ testrun() { ipt_zero_rule "$iptables" || die "iptables matched martian" ipt_zero_rule "$ip6tables" || die "ip6tables matched martian" + ipt_zero_reverse_rule "$iptables" && die "iptables not matched martian" + ipt_zero_reverse_rule "$ip6tables" && die "ip6tables not matched martian" nft_zero_rule ip || die "nft IPv4 matched martian" nft_zero_rule ip6 || die "nft IPv6 matched martian" + clear_counters + # test 2: rpfilter match should pass for regular traffic netns_ping "$ns1" 192.168.23.1 || \ die "regular ping 192.168.23.1 failed" @@ -131,6 +149,8 @@ testrun() { ipt_zero_rule "$iptables" && die "iptables match not effective" ipt_zero_rule "$ip6tables" && die "ip6tables match not effective" + ipt_zero_reverse_rule "$iptables" || die "iptables match over-effective" + ipt_zero_reverse_rule "$ip6tables" || die "ip6tables match over-effective" nft_zero_rule ip && die "nft IPv4 match not effective" nft_zero_rule ip6 && die "nft IPv6 match not effective" diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 61386e499b77..43ec36b179dc 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -128,6 +128,8 @@ struct seccomp_data { # define __NR_seccomp 277 # elif defined(__csky__) # define __NR_seccomp 277 +# elif defined(__loongarch__) +# define __NR_seccomp 277 # elif defined(__hppa__) # define __NR_seccomp 338 # elif defined(__powerpc__) @@ -1755,6 +1757,10 @@ TEST_F(TRACE_poke, getpid_runs_normally) NT_ARM_SYSTEM_CALL, &__v)); \ } while (0) # define SYSCALL_RET(_regs) (_regs).regs[0] +#elif defined(__loongarch__) +# define ARCH_REGS struct user_pt_regs +# define SYSCALL_NUM(_regs) (_regs).regs[11] +# define SYSCALL_RET(_regs) (_regs).regs[4] #elif defined(__riscv) && __riscv_xlen == 64 # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM(_regs) (_regs).a7 diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index 1b25cc7c64bb..7b7139d97d74 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile @@ -4,7 +4,7 @@ test: virtio_test vringh_test virtio_test: virtio_ring.o virtio_test.o vringh_test: vringh_test.o vringh.o virtio_ring.o -CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h +CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -mfunction-return=thunk -fcf-protection=none -mindirect-branch-register CFLAGS += -pthread LDFLAGS += -pthread vpath %.c ../../drivers/virtio ../../drivers/vhost |