diff options
Diffstat (limited to 'tools')
53 files changed, 2000 insertions, 253 deletions
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index ea69ce35e902..c3bd294a63d1 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3746,7 +3746,7 @@ static const struct flag flags[] = { { "NET_TX_SOFTIRQ", 2 }, { "NET_RX_SOFTIRQ", 3 }, { "BLOCK_SOFTIRQ", 4 }, - { "BLOCK_IOPOLL_SOFTIRQ", 5 }, + { "IRQ_POLL_SOFTIRQ", 5 }, { "TASKLET_SOFTIRQ", 6 }, { "SCHED_SOFTIRQ", 7 }, { "HRTIMER_SOFTIRQ", 8 }, diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c index 9a287bec695a..4f254bcc4423 100644 --- a/tools/net/bpf_dbg.c +++ b/tools/net/bpf_dbg.c @@ -129,16 +129,16 @@ struct bpf_regs { }; static struct sock_filter bpf_image[BPF_MAXINSNS + 1]; -static unsigned int bpf_prog_len = 0; +static unsigned int bpf_prog_len; static int bpf_breakpoints[64]; static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1]; static struct bpf_regs bpf_curr; -static unsigned int bpf_regs_len = 0; +static unsigned int bpf_regs_len; static int pcap_fd = -1; -static unsigned int pcap_packet = 0; -static size_t pcap_map_size = 0; +static unsigned int pcap_packet; +static size_t pcap_map_size; static char *pcap_ptr_va_start, *pcap_ptr_va_curr; static const char * const op_table[] = { @@ -1172,7 +1172,7 @@ static int cmd_breakpoint(char *subcmd) static int cmd_run(char *num) { - static uint32_t pass = 0, fail = 0; + static uint32_t pass, fail; bool has_limit = true; int pkts = 0, i = 0; diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l index 7cc72a336645..bd83149e7be0 100644 --- a/tools/net/bpf_exp.l +++ b/tools/net/bpf_exp.l @@ -23,6 +23,9 @@ #include <stdio.h> #include <stdint.h> #include <stdlib.h> +#include <string.h> + +#include <linux/filter.h> #include "bpf_exp.yacc.h" @@ -79,22 +82,71 @@ extern void yyerror(const char *str); "txa" { return OP_TXA; } "#"?("len") { return K_PKT_LEN; } -"#"?("proto") { return K_PROTO; } -"#"?("type") { return K_TYPE; } -"#"?("poff") { return K_POFF; } -"#"?("ifidx") { return K_IFIDX; } -"#"?("nla") { return K_NLATTR; } -"#"?("nlan") { return K_NLATTR_NEST; } -"#"?("mark") { return K_MARK; } -"#"?("queue") { return K_QUEUE; } -"#"?("hatype") { return K_HATYPE; } -"#"?("rxhash") { return K_RXHASH; } -"#"?("cpu") { return K_CPU; } -"#"?("vlan_tci") { return K_VLAN_TCI; } -"#"?("vlan_pr") { return K_VLAN_AVAIL; } -"#"?("vlan_avail") { return K_VLAN_AVAIL; } -"#"?("vlan_tpid") { return K_VLAN_TPID; } -"#"?("rand") { return K_RAND; } + +"#"?("proto") { + yylval.number = SKF_AD_PROTOCOL; + return extension; + } +"#"?("type") { + yylval.number = SKF_AD_PKTTYPE; + return extension; + } +"#"?("poff") { + yylval.number = SKF_AD_PAY_OFFSET; + return extension; + } +"#"?("ifidx") { + yylval.number = SKF_AD_IFINDEX; + return extension; + } +"#"?("nla") { + yylval.number = SKF_AD_NLATTR; + return extension; + } +"#"?("nlan") { + yylval.number = SKF_AD_NLATTR_NEST; + return extension; + } +"#"?("mark") { + yylval.number = SKF_AD_MARK; + return extension; + } +"#"?("queue") { + yylval.number = SKF_AD_QUEUE; + return extension; + } +"#"?("hatype") { + yylval.number = SKF_AD_HATYPE; + return extension; + } +"#"?("rxhash") { + yylval.number = SKF_AD_RXHASH; + return extension; + } +"#"?("cpu") { + yylval.number = SKF_AD_CPU; + return extension; + } +"#"?("vlan_tci") { + yylval.number = SKF_AD_VLAN_TAG; + return extension; + } +"#"?("vlan_pr") { + yylval.number = SKF_AD_VLAN_TAG_PRESENT; + return extension; + } +"#"?("vlan_avail") { + yylval.number = SKF_AD_VLAN_TAG_PRESENT; + return extension; + } +"#"?("vlan_tpid") { + yylval.number = SKF_AD_VLAN_TPID; + return extension; + } +"#"?("rand") { + yylval.number = SKF_AD_RANDOM; + return extension; + } ":" { return ':'; } "," { return ','; } diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y index e24eea1b0db5..56ba1de50784 100644 --- a/tools/net/bpf_exp.y +++ b/tools/net/bpf_exp.y @@ -35,6 +35,7 @@ enum jmp_type { JTL, JFL, JKL }; extern FILE *yyin; +extern int yylineno; extern int yylex(void); extern void yyerror(const char *str); @@ -55,14 +56,14 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type); %token OP_RET OP_TAX OP_TXA OP_LDXB OP_MOD OP_NEG OP_JNEQ OP_JLT OP_JLE OP_LDI %token OP_LDXI -%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE -%token K_RXHASH K_CPU K_IFIDX K_VLAN_TCI K_VLAN_AVAIL K_VLAN_TPID K_POFF K_RAND +%token K_PKT_LEN %token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%' -%token number label +%token extension number label %type <label> label +%type <number> extension %type <number> number %% @@ -125,51 +126,9 @@ ldb bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $6); } | OP_LDB '[' number ']' { bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, $3); } - | OP_LDB K_PROTO { + | OP_LDB extension { bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PROTOCOL); } - | OP_LDB K_TYPE { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PKTTYPE); } - | OP_LDB K_IFIDX { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_IFINDEX); } - | OP_LDB K_NLATTR { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR); } - | OP_LDB K_NLATTR_NEST { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR_NEST); } - | OP_LDB K_MARK { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_MARK); } - | OP_LDB K_QUEUE { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_QUEUE); } - | OP_LDB K_HATYPE { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_HATYPE); } - | OP_LDB K_RXHASH { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RXHASH); } - | OP_LDB K_CPU { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_CPU); } - | OP_LDB K_VLAN_TCI { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG); } - | OP_LDB K_VLAN_AVAIL { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } - | OP_LDB K_POFF { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PAY_OFFSET); } - | OP_LDB K_RAND { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RANDOM); } - | OP_LDB K_VLAN_TPID { - bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TPID); } + SKF_AD_OFF + $2); } ; ldh @@ -179,51 +138,9 @@ ldh bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $6); } | OP_LDH '[' number ']' { bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, $3); } - | OP_LDH K_PROTO { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PROTOCOL); } - | OP_LDH K_TYPE { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PKTTYPE); } - | OP_LDH K_IFIDX { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_IFINDEX); } - | OP_LDH K_NLATTR { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR); } - | OP_LDH K_NLATTR_NEST { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR_NEST); } - | OP_LDH K_MARK { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_MARK); } - | OP_LDH K_QUEUE { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_QUEUE); } - | OP_LDH K_HATYPE { + | OP_LDH extension { bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_HATYPE); } - | OP_LDH K_RXHASH { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RXHASH); } - | OP_LDH K_CPU { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_CPU); } - | OP_LDH K_VLAN_TCI { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG); } - | OP_LDH K_VLAN_AVAIL { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } - | OP_LDH K_POFF { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PAY_OFFSET); } - | OP_LDH K_RAND { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RANDOM); } - | OP_LDH K_VLAN_TPID { - bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TPID); } + SKF_AD_OFF + $2); } ; ldi @@ -238,51 +155,9 @@ ld bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); } | OP_LD K_PKT_LEN { bpf_set_curr_instr(BPF_LD | BPF_W | BPF_LEN, 0, 0, 0); } - | OP_LD K_PROTO { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PROTOCOL); } - | OP_LD K_TYPE { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PKTTYPE); } - | OP_LD K_IFIDX { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_IFINDEX); } - | OP_LD K_NLATTR { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR); } - | OP_LD K_NLATTR_NEST { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_NLATTR_NEST); } - | OP_LD K_MARK { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_MARK); } - | OP_LD K_QUEUE { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_QUEUE); } - | OP_LD K_HATYPE { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_HATYPE); } - | OP_LD K_RXHASH { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RXHASH); } - | OP_LD K_CPU { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_CPU); } - | OP_LD K_VLAN_TCI { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG); } - | OP_LD K_VLAN_AVAIL { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } - | OP_LD K_POFF { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_PAY_OFFSET); } - | OP_LD K_RAND { - bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_RANDOM); } - | OP_LD K_VLAN_TPID { + | OP_LD extension { bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, - SKF_AD_OFF + SKF_AD_VLAN_TPID); } + SKF_AD_OFF + $2); } | OP_LD 'M' '[' number ']' { bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); } | OP_LD '[' 'x' '+' number ']' { @@ -776,5 +651,6 @@ void bpf_asm_compile(FILE *fp, bool cstyle) void yyerror(const char *str) { + fprintf(stderr, "error: %s at line %d\n", str, yylineno); exit(1); } diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 0a22407e1d7d..5d34815c7ccb 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -77,6 +77,9 @@ include config/utilities.mak # Define NO_AUXTRACE if you do not want AUX area tracing support # # Define NO_LIBBPF if you do not want BPF support +# +# Define FEATURES_DUMP to provide features detection dump file +# and bypass the feature detection # As per kernel Makefile, avoid funny character set dependencies unexport LC_ALL @@ -166,6 +169,15 @@ ifeq ($(config),1) include config/Makefile endif +# The FEATURE_DUMP_EXPORT holds location of the actual +# FEATURE_DUMP file to be used to bypass feature detection +# (for bpf or any other subproject) +ifeq ($(FEATURES_DUMP),) +FEATURE_DUMP_EXPORT := $(realpath $(OUTPUT)FEATURE-DUMP) +else +FEATURE_DUMP_EXPORT := $(FEATURES_DUMP) +endif + export prefix bindir sharedir sysconfdir DESTDIR # sparse is architecture-neutral, which means that we need to tell it @@ -436,7 +448,7 @@ $(LIBAPI)-clean: $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null $(LIBBPF): fixdep FORCE - $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(realpath $(OUTPUT)FEATURE-DUMP) + $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) $(LIBBPF)-clean: $(call QUIET_CLEAN, libbpf) @@ -611,6 +623,17 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(python-clean) # +# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY) +# file if defined, with no further action. +feature-dump: +ifdef FEATURE_DUMP_COPY + @cp $(OUTPUT)FEATURE-DUMP $(FEATURE_DUMP_COPY) + @echo "FEATURE-DUMP file copied into $(FEATURE_DUMP_COPY)" +else + @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP" +endif + +# # Trick: if ../../.git does not exist - we are building out of tree for example, # then force version regeneration: # diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c index 3e89ba825f6b..7f064eb37158 100644 --- a/tools/perf/arch/x86/tests/intel-cqm.c +++ b/tools/perf/arch/x86/tests/intel-cqm.c @@ -17,7 +17,7 @@ static pid_t spawn(void) if (pid) return pid; - while(1); + while(1) sleep(5); return 0; } diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index e5959c136a19..511141b102e8 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -181,7 +181,11 @@ LDFLAGS += -Wl,-z,noexecstack EXTLIBS = -lpthread -lrt -lm -ldl +ifeq ($(FEATURES_DUMP),) include $(srctree)/tools/build/Makefile.feature +else +include $(FEATURES_DUMP) +endif ifeq ($(feature-stackprotector-all), 1) CFLAGS += -fstack-protector-all diff --git a/tools/perf/tests/make b/tools/perf/tests/make index df38decc48c3..f918015512af 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -5,7 +5,7 @@ ifeq ($(MAKECMDGOALS),) # no target specified, trigger the whole suite all: @echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile - @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf + @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1 else # run only specific test over 'Makefile' %: @@ -13,6 +13,26 @@ else endif else PERF := . +PERF_O := $(PERF) +O_OPT := + +ifneq ($(O),) + FULL_O := $(shell readlink -f $(O) || echo $(O)) + PERF_O := $(FULL_O) + ifeq ($(SET_O),1) + O_OPT := 'O=$(FULL_O)' + endif + K_O_OPT := 'O=$(FULL_O)' +endif + +PARALLEL_OPT= +ifeq ($(SET_PARALLEL),1) + cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null) + ifeq ($(cores),0) + cores := 1 + endif + PARALLEL_OPT="-j$(cores)" +endif # As per kernel Makefile, avoid funny character set dependencies unexport LC_ALL @@ -156,11 +176,11 @@ test_make_doc := $(test_ok) test_make_help_O := $(test_ok) test_make_doc_O := $(test_ok) -test_make_python_perf_so := test -f $(PERF)/python/perf.so +test_make_python_perf_so := test -f $(PERF_O)/python/perf.so -test_make_perf_o := test -f $(PERF)/perf.o -test_make_util_map_o := test -f $(PERF)/util/map.o -test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o +test_make_perf_o := test -f $(PERF_O)/perf.o +test_make_util_map_o := test -f $(PERF_O)/util/map.o +test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o define test_dest_files for file in $(1); do \ @@ -227,7 +247,7 @@ test_make_perf_o_O := test -f $$TMP_O/perf.o test_make_util_map_o_O := test -f $$TMP_O/util/map.o test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o -test_default = test -x $(PERF)/perf +test_default = test -x $(PERF_O)/perf test = $(if $(test_$1),$(test_$1),$(test_default)) test_default_O = test -x $$TMP_O/perf @@ -247,12 +267,12 @@ endif MAKEFLAGS := --no-print-directory -clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null) +clean := @(cd $(PERF); make -s -f $(MK) $(O_OPT) clean >/dev/null) $(run): $(call clean) @TMP_DEST=$$(mktemp -d); \ - cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \ + cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST $($@)"; \ echo "- $@: $$cmd" && echo $$cmd > $@ && \ ( eval $$cmd ) >> $@ 2>&1; \ echo " test: $(call test,$@)" >> $@ 2>&1; \ @@ -263,7 +283,7 @@ $(run_O): $(call clean) @TMP_O=$$(mktemp -d); \ TMP_DEST=$$(mktemp -d); \ - cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \ + cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \ echo "- $@: $$cmd" && echo $$cmd > $@ && \ ( eval $$cmd ) >> $@ 2>&1 && \ echo " test: $(call test_O,$@)" >> $@ 2>&1; \ @@ -276,17 +296,22 @@ tarpkg: ( eval $$cmd ) >> $@ 2>&1 && \ rm -f $@ +KERNEL_O := ../.. +ifneq ($(O),) + KERNEL_O := $(O) +endif + make_kernelsrc: - @echo "- make -C <kernelsrc> tools/perf" + @echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf" $(call clean); \ - (make -C ../.. tools/perf) > $@ 2>&1 && \ - test -x perf && rm -f $@ || (cat $@ ; false) + (make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \ + test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false) make_kernelsrc_tools: - @echo "- make -C <kernelsrc>/tools perf" + @echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf" $(call clean); \ - (make -C ../../tools perf) > $@ 2>&1 && \ - test -x perf && rm -f $@ || (cat $@ ; false) + (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \ + test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false) all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools @echo OK diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index d4d7cc27252f..718bd46d47fa 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser, nd = browser->curr_hot; break; case K_UNTAB: - if (nd != NULL) + if (nd != NULL) { nd = rb_next(nd); if (nd == NULL) nd = rb_first(&browser->entries); - else + } else nd = browser->curr_hot; break; case K_F1: diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index c226303e3da0..68a7612019dc 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -131,6 +131,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) symlen = unresolved_col_width + 4 + 2; hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); + hists__new_col_len(hists, HISTC_MEM_DCACHELINE, + symlen); } if (h->mem_info->iaddr.sym) { diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 81a2eb77ba7f..05d815851be1 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -2068,6 +2068,15 @@ int intel_pt_process_auxtrace_info(union perf_event *event, err = -ENOMEM; goto err_free_queues; } + + /* + * Since this thread will not be kept in any rbtree not in a + * list, initialize its list node so that at thread__put() the + * current thread lifetime assuption is kept and we don't segfault + * at list_del_init(). + */ + INIT_LIST_HEAD(&pt->unknown_thread->node); + err = thread__set_comm(pt->unknown_thread, "unknown", 0); if (err) goto err_delete_thread; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4f7b0efdde2f..813d9b272c81 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err, { char help[BUFSIZ]; + if (!e) + return; + /* * We get error directly from syscall errno ( > 0), * or from encoded pointer's error ( < 0). diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 2be10fb27172..4ce5c5e18f48 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -686,8 +686,9 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) pf->fb_ops = NULL; #if _ELFUTILS_PREREQ(0, 142) } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && - pf->cfi != NULL) { - if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || + (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) { + if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 && + (dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) || dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { pr_warning("Failed to get call frame on 0x%jx\n", (uintmax_t)pf->addr); @@ -1015,8 +1016,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) return DWARF_CB_OK; } -/* Find probe points from debuginfo */ -static int debuginfo__find_probes(struct debuginfo *dbg, +static int debuginfo__find_probe_location(struct debuginfo *dbg, struct probe_finder *pf) { struct perf_probe_point *pp = &pf->pev->point; @@ -1025,27 +1025,6 @@ static int debuginfo__find_probes(struct debuginfo *dbg, Dwarf_Die *diep; int ret = 0; -#if _ELFUTILS_PREREQ(0, 142) - Elf *elf; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - - /* Get the call frame information from this dwarf */ - elf = dwarf_getelf(dbg->dbg); - if (elf == NULL) - return -EINVAL; - - if (gelf_getehdr(elf, &ehdr) == NULL) - return -EINVAL; - - if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && - shdr.sh_type == SHT_PROGBITS) { - pf->cfi = dwarf_getcfi_elf(elf); - } else { - pf->cfi = dwarf_getcfi(dbg->dbg); - } -#endif - off = 0; pf->lcache = intlist__new(NULL); if (!pf->lcache) @@ -1108,6 +1087,39 @@ found: return ret; } +/* Find probe points from debuginfo */ +static int debuginfo__find_probes(struct debuginfo *dbg, + struct probe_finder *pf) +{ + int ret = 0; + +#if _ELFUTILS_PREREQ(0, 142) + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + + if (pf->cfi_eh || pf->cfi_dbg) + return debuginfo__find_probe_location(dbg, pf); + + /* Get the call frame information from this dwarf */ + elf = dwarf_getelf(dbg->dbg); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + return -EINVAL; + + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && + shdr.sh_type == SHT_PROGBITS) + pf->cfi_eh = dwarf_getcfi_elf(elf); + + pf->cfi_dbg = dwarf_getcfi(dbg->dbg); +#endif + + ret = debuginfo__find_probe_location(dbg, pf); + return ret; +} + struct local_vars_finder { struct probe_finder *pf; struct perf_probe_arg *args; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index bed82716e1b4..0aec7704e395 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -76,7 +76,10 @@ struct probe_finder { /* For variable searching */ #if _ELFUTILS_PREREQ(0, 142) - Dwarf_CFI *cfi; /* Call Frame Information */ + /* Call Frame Information from .eh_frame */ + Dwarf_CFI *cfi_eh; + /* Call Frame Information from .debug_frame */ + Dwarf_CFI *cfi_dbg; #endif Dwarf_Op *fb_ops; /* Frame base attribute */ struct perf_probe_arg *pvar; /* Current target variable */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index d5636ba94b20..40b7a0d0905b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1149,7 +1149,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines, machine = machines__find(machines, pid); if (!machine) - machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID); + machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); return machine; } diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 2f901d15e063..afb0c45eba34 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -310,7 +310,16 @@ int perf_stat_process_counter(struct perf_stat_config *config, int i, ret; aggr->val = aggr->ena = aggr->run = 0; - init_stats(ps->res_stats); + + /* + * We calculate counter's data every interval, + * and the display code shows ps->res_stats + * avg value. We need to zero the stats for + * interval mode, otherwise overall avg running + * averages will be shown for each interval. + */ + if (config->interval) + init_stats(ps->res_stats); if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 3b2de6eb3376..ab02209a7cf3 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1466,7 +1466,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) * Read the build id if possible. This is required for * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work */ - if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0) + if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0) dso__set_build_id(dso, build_id); /* diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 8ff7d620d942..33b52eaa39db 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -209,7 +209,7 @@ static const struct flag flags[] = { { "NET_TX_SOFTIRQ", 2 }, { "NET_RX_SOFTIRQ", 3 }, { "BLOCK_SOFTIRQ", 4 }, - { "BLOCK_IOPOLL_SOFTIRQ", 5 }, + { "IRQ_POLL_SOFTIRQ", 5 }, { "TASKLET_SOFTIRQ", 6 }, { "SCHED_SOFTIRQ", 7 }, { "HRTIMER_SOFTIRQ", 8 }, diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c index eec688041500..e73a79fce015 100644 --- a/tools/power/acpi/common/cmfsize.c +++ b/tools/power/acpi/common/cmfsize.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c index efefe309367a..0bd343f136a4 100644 --- a/tools/power/acpi/common/getopt.c +++ b/tools/power/acpi/common/getopt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c index 6df758302604..11f4aba55aab 100644 --- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c +++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c index dd5008b0617a..d0e6b857d8d1 100644 --- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c +++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c index e153fcb12b1a..66c4badf03e5 100644 --- a/tools/power/acpi/os_specific/service_layers/osunixdir.c +++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c index 44ad4889d468..3818fd07e50f 100644 --- a/tools/power/acpi/os_specific/service_layers/osunixmap.c +++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c index 6858c0893c91..08cb8b2035f2 100644 --- a/tools/power/acpi/os_specific/service_layers/osunixxf.c +++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h index eed534481434..025c232e920d 100644 --- a/tools/power/acpi/tools/acpidump/acpidump.h +++ b/tools/power/acpi/tools/acpidump/acpidump.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c index 61d0de804b70..da44458d3b6c 100644 --- a/tools/power/acpi/tools/acpidump/apdump.c +++ b/tools/power/acpi/tools/acpidump/apdump.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c index bbdf9e8e25bc..5fcd9700ac18 100644 --- a/tools/power/acpi/tools/acpidump/apfiles.c +++ b/tools/power/acpi/tools/acpidump/apfiles.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c index 57620f66ae6c..c3c09152fac6 100644 --- a/tools/power/acpi/tools/acpidump/apmain.c +++ b/tools/power/acpi/tools/acpidump/apmain.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2015, Intel Corp. + * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index 8f3f5bb9c74e..590d12a25f6e 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -10,6 +10,7 @@ #include <errno.h> #include <stdlib.h> #include <string.h> +#include <limits.h> #include <getopt.h> diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 7ec7df9e7fc7..0c1a7e65bb81 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -113,7 +113,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res, } EXPORT_SYMBOL(__wrap_devm_memremap_pages); -pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags) +pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) { struct nfit_test_resource *nfit_res = get_nfit_res(addr); diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 90bd2ea41032..b3281dcd4a5d 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -217,13 +217,16 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, return rc; } +#define NFIT_TEST_ARS_RECORDS 4 + static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, unsigned int buf_len) { if (buf_len < sizeof(*nd_cmd)) return -EINVAL; - nd_cmd->max_ars_out = 256; + nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status) + + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record); nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; return 0; @@ -246,7 +249,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd, if (buf_len < sizeof(*nd_cmd)) return -EINVAL; - nd_cmd->out_length = 256; + nd_cmd->out_length = sizeof(struct nd_cmd_ars_status); + /* TODO: emit error records */ nd_cmd->num_records = 0; nd_cmd->address = 0; nd_cmd->length = -1ULL; diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh index 77edcdcc016b..057278448515 100755 --- a/tools/testing/selftests/efivarfs/efivarfs.sh +++ b/tools/testing/selftests/efivarfs/efivarfs.sh @@ -88,7 +88,11 @@ test_delete() exit 1 fi - rm $file + rm $file 2>/dev/null + if [ $? -ne 0 ]; then + chattr -i $file + rm $file + fi if [ -e $file ]; then echo "$file couldn't be deleted" >&2 @@ -111,6 +115,7 @@ test_zero_size_delete() exit 1 fi + chattr -i $file printf "$attrs" > $file if [ -e $file ]; then @@ -141,7 +146,11 @@ test_valid_filenames() echo "$file could not be created" >&2 ret=1 else - rm $file + rm $file 2>/dev/null + if [ $? -ne 0 ]; then + chattr -i $file + rm $file + fi fi done @@ -174,7 +183,11 @@ test_invalid_filenames() if [ -e $file ]; then echo "Creating $file should have failed" >&2 - rm $file + rm $file 2>/dev/null + if [ $? -ne 0 ]; then + chattr -i $file + rm $file + fi ret=1 fi done diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c index 8c0764407b3c..4af74f733036 100644 --- a/tools/testing/selftests/efivarfs/open-unlink.c +++ b/tools/testing/selftests/efivarfs/open-unlink.c @@ -1,10 +1,68 @@ +#include <errno.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> +#include <sys/ioctl.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> +#include <linux/fs.h> + +static int set_immutable(const char *path, int immutable) +{ + unsigned int flags; + int fd; + int rc; + int error; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); + if (rc < 0) { + error = errno; + close(fd); + errno = error; + return rc; + } + + if (immutable) + flags |= FS_IMMUTABLE_FL; + else + flags &= ~FS_IMMUTABLE_FL; + + rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); + error = errno; + close(fd); + errno = error; + return rc; +} + +static int get_immutable(const char *path) +{ + unsigned int flags; + int fd; + int rc; + int error; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); + if (rc < 0) { + error = errno; + close(fd); + errno = error; + return rc; + } + close(fd); + if (flags & FS_IMMUTABLE_FL) + return 1; + return 0; +} int main(int argc, char **argv) { @@ -27,7 +85,7 @@ int main(int argc, char **argv) buf[4] = 0; /* create a test variable */ - fd = open(path, O_WRONLY | O_CREAT); + fd = open(path, O_WRONLY | O_CREAT, 0600); if (fd < 0) { perror("open(O_WRONLY)"); return EXIT_FAILURE; @@ -41,6 +99,18 @@ int main(int argc, char **argv) close(fd); + rc = get_immutable(path); + if (rc < 0) { + perror("ioctl(FS_IOC_GETFLAGS)"); + return EXIT_FAILURE; + } else if (rc) { + rc = set_immutable(path, 0); + if (rc < 0) { + perror("ioctl(FS_IOC_SETFLAGS)"); + return EXIT_FAILURE; + } + } + fd = open(path, O_RDONLY); if (fd < 0) { perror("open"); diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc index 773e276ff90b..1e1abe0ad354 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc @@ -39,28 +39,23 @@ instance_slam() { } instance_slam & -x=`jobs -l` -p1=`echo $x | cut -d' ' -f2` +p1=$! echo $p1 instance_slam & -x=`jobs -l | tail -1` -p2=`echo $x | cut -d' ' -f2` +p2=$! echo $p2 instance_slam & -x=`jobs -l | tail -1` -p3=`echo $x | cut -d' ' -f2` +p3=$! echo $p3 instance_slam & -x=`jobs -l | tail -1` -p4=`echo $x | cut -d' ' -f2` +p4=$! echo $p4 instance_slam & -x=`jobs -l | tail -1` -p5=`echo $x | cut -d' ' -f2` +p5=$! echo $p5 ls -lR >/dev/null diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile index 47147b968514..08360060ab14 100644 --- a/tools/testing/selftests/lib/Makefile +++ b/tools/testing/selftests/lib/Makefile @@ -3,6 +3,6 @@ # No binaries, but make sure arg-less "make" doesn't trigger "run_tests" all: -TEST_PROGS := printf.sh +TEST_PROGS := printf.sh bitmap.sh include ../lib.mk diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh new file mode 100755 index 000000000000..2da187b6ddad --- /dev/null +++ b/tools/testing/selftests/lib/bitmap.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# Runs bitmap infrastructure tests using test_bitmap kernel module + +if /sbin/modprobe -q test_bitmap; then + /sbin/modprobe -q -r test_bitmap + echo "bitmap: ok" +else + echo "bitmap: [FAIL]" + exit 1 +fi diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 6fb23366b258..69bb3fc38fb2 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -2,3 +2,4 @@ socket psock_fanout psock_tpacket reuseport_bpf +reuseport_bpf_cpu diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 41449b5ad0a9..c658792d47b4 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g CFLAGS += -I../../../../usr/include/ -NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf +NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu all: $(NET_PROGS) %: %.c diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index bec1b5dd2530..96ba386b1b7b 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c @@ -9,10 +9,12 @@ #include <errno.h> #include <error.h> +#include <fcntl.h> #include <linux/bpf.h> #include <linux/filter.h> #include <linux/unistd.h> #include <netinet/in.h> +#include <netinet/tcp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -169,9 +171,15 @@ static void build_recv_group(const struct test_params p, int fd[], uint16_t mod, if (bind(fd[i], addr, sockaddr_size())) error(1, errno, "failed to bind recv socket %d", i); - if (p.protocol == SOCK_STREAM) + if (p.protocol == SOCK_STREAM) { + opt = 4; + if (setsockopt(fd[i], SOL_TCP, TCP_FASTOPEN, &opt, + sizeof(opt))) + error(1, errno, + "failed to set TCP_FASTOPEN on %d", i); if (listen(fd[i], p.recv_socks * 10)) error(1, errno, "failed to listen on socket"); + } } free(addr); } @@ -189,10 +197,8 @@ static void send_from(struct test_params p, uint16_t sport, char *buf, if (bind(fd, saddr, sockaddr_size())) error(1, errno, "failed to bind send socket"); - if (connect(fd, daddr, sockaddr_size())) - error(1, errno, "failed to connect"); - if (send(fd, buf, len, 0) < 0) + if (sendto(fd, buf, len, MSG_FASTOPEN, daddr, sockaddr_size()) < 0) error(1, errno, "failed to send message"); close(fd); @@ -260,7 +266,7 @@ static void test_recv_order(const struct test_params p, int fd[], int mod) } } -static void test_reuseport_ebpf(const struct test_params p) +static void test_reuseport_ebpf(struct test_params p) { int i, fd[p.recv_socks]; @@ -268,6 +274,7 @@ static void test_reuseport_ebpf(const struct test_params p) build_recv_group(p, fd, p.recv_socks, attach_ebpf); test_recv_order(p, fd, p.recv_socks); + p.send_port_min += p.recv_socks * 2; fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2); attach_ebpf(fd[0], p.recv_socks / 2); test_recv_order(p, fd, p.recv_socks / 2); @@ -276,7 +283,7 @@ static void test_reuseport_ebpf(const struct test_params p) close(fd[i]); } -static void test_reuseport_cbpf(const struct test_params p) +static void test_reuseport_cbpf(struct test_params p) { int i, fd[p.recv_socks]; @@ -284,6 +291,7 @@ static void test_reuseport_cbpf(const struct test_params p) build_recv_group(p, fd, p.recv_socks, attach_cbpf); test_recv_order(p, fd, p.recv_socks); + p.send_port_min += p.recv_socks * 2; fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2); attach_cbpf(fd[0], p.recv_socks / 2); test_recv_order(p, fd, p.recv_socks / 2); @@ -377,7 +385,7 @@ static void test_filter_no_reuseport(const struct test_params p) static void test_filter_without_bind(void) { - int fd1, fd2; + int fd1, fd2, opt = 1; fprintf(stderr, "Testing filter add without bind...\n"); fd1 = socket(AF_INET, SOCK_DGRAM, 0); @@ -386,6 +394,10 @@ static void test_filter_without_bind(void) fd2 = socket(AF_INET, SOCK_DGRAM, 0); if (fd2 < 0) error(1, errno, "failed to create socket 2"); + if (setsockopt(fd1, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt))) + error(1, errno, "failed to set SO_REUSEPORT on socket 1"); + if (setsockopt(fd2, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt))) + error(1, errno, "failed to set SO_REUSEPORT on socket 2"); attach_ebpf(fd1, 10); attach_cbpf(fd2, 10); @@ -394,6 +406,32 @@ static void test_filter_without_bind(void) close(fd2); } +void enable_fastopen(void) +{ + int fd = open("/proc/sys/net/ipv4/tcp_fastopen", 0); + int rw_mask = 3; /* bit 1: client side; bit-2 server side */ + int val, size; + char buf[16]; + + if (fd < 0) + error(1, errno, "Unable to open tcp_fastopen sysctl"); + if (read(fd, buf, sizeof(buf)) <= 0) + error(1, errno, "Unable to read tcp_fastopen sysctl"); + val = atoi(buf); + close(fd); + + if ((val & rw_mask) != rw_mask) { + fd = open("/proc/sys/net/ipv4/tcp_fastopen", O_RDWR); + if (fd < 0) + error(1, errno, + "Unable to open tcp_fastopen sysctl for writing"); + val |= rw_mask; + size = snprintf(buf, 16, "%d", val); + if (write(fd, buf, size) <= 0) + error(1, errno, "Unable to write tcp_fastopen sysctl"); + close(fd); + } +} int main(void) { @@ -506,6 +544,71 @@ int main(void) .recv_port = 8007, .send_port_min = 9100}); + /* TCP fastopen is required for the TCP tests */ + enable_fastopen(); + fprintf(stderr, "---- IPv4 TCP ----\n"); + test_reuseport_ebpf((struct test_params) { + .recv_family = AF_INET, + .send_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8008, + .send_port_min = 9120}); + test_reuseport_cbpf((struct test_params) { + .recv_family = AF_INET, + .send_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8009, + .send_port_min = 9160}); + test_extra_filter((struct test_params) { + .recv_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_port = 8010}); + test_filter_no_reuseport((struct test_params) { + .recv_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_port = 8011}); + + fprintf(stderr, "---- IPv6 TCP ----\n"); + test_reuseport_ebpf((struct test_params) { + .recv_family = AF_INET6, + .send_family = AF_INET6, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8012, + .send_port_min = 9200}); + test_reuseport_cbpf((struct test_params) { + .recv_family = AF_INET6, + .send_family = AF_INET6, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8013, + .send_port_min = 9240}); + test_extra_filter((struct test_params) { + .recv_family = AF_INET6, + .protocol = SOCK_STREAM, + .recv_port = 8014}); + test_filter_no_reuseport((struct test_params) { + .recv_family = AF_INET6, + .protocol = SOCK_STREAM, + .recv_port = 8015}); + + fprintf(stderr, "---- IPv6 TCP w/ mapped IPv4 ----\n"); + test_reuseport_ebpf((struct test_params) { + .recv_family = AF_INET6, + .send_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8016, + .send_port_min = 9320}); + test_reuseport_cbpf((struct test_params) { + .recv_family = AF_INET6, + .send_family = AF_INET, + .protocol = SOCK_STREAM, + .recv_socks = 10, + .recv_port = 8017, + .send_port_min = 9360}); test_filter_without_bind(); diff --git a/tools/testing/selftests/net/reuseport_bpf_cpu.c b/tools/testing/selftests/net/reuseport_bpf_cpu.c new file mode 100644 index 000000000000..b23d6f54de7b --- /dev/null +++ b/tools/testing/selftests/net/reuseport_bpf_cpu.c @@ -0,0 +1,258 @@ +/* + * Test functionality of BPF filters with SO_REUSEPORT. This program creates + * an SO_REUSEPORT receiver group containing one socket per CPU core. It then + * creates a BPF program that will select a socket from this group based + * on the core id that receives the packet. The sending code artificially + * moves itself to run on different core ids and sends one message from + * each core. Since these packets are delivered over loopback, they should + * arrive on the same core that sent them. The receiving code then ensures + * that the packet was received on the socket for the corresponding core id. + * This entire process is done for several different core id permutations + * and for each IPv4/IPv6 and TCP/UDP combination. + */ + +#define _GNU_SOURCE + +#include <arpa/inet.h> +#include <errno.h> +#include <error.h> +#include <linux/filter.h> +#include <linux/in.h> +#include <linux/unistd.h> +#include <sched.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/epoll.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <unistd.h> + +static const int PORT = 8888; + +static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) +{ + struct sockaddr_storage addr; + struct sockaddr_in *addr4; + struct sockaddr_in6 *addr6; + size_t i; + int opt; + + switch (family) { + case AF_INET: + addr4 = (struct sockaddr_in *)&addr; + addr4->sin_family = AF_INET; + addr4->sin_addr.s_addr = htonl(INADDR_ANY); + addr4->sin_port = htons(PORT); + break; + case AF_INET6: + addr6 = (struct sockaddr_in6 *)&addr; + addr6->sin6_family = AF_INET6; + addr6->sin6_addr = in6addr_any; + addr6->sin6_port = htons(PORT); + break; + default: + error(1, 0, "Unsupported family %d", family); + } + + for (i = 0; i < len; ++i) { + rcv_fd[i] = socket(family, proto, 0); + if (rcv_fd[i] < 0) + error(1, errno, "failed to create receive socket"); + + opt = 1; + if (setsockopt(rcv_fd[i], SOL_SOCKET, SO_REUSEPORT, &opt, + sizeof(opt))) + error(1, errno, "failed to set SO_REUSEPORT"); + + if (bind(rcv_fd[i], (struct sockaddr *)&addr, sizeof(addr))) + error(1, errno, "failed to bind receive socket"); + + if (proto == SOCK_STREAM && listen(rcv_fd[i], len * 10)) + error(1, errno, "failed to listen on receive port"); + } +} + +static void attach_bpf(int fd) +{ + struct sock_filter code[] = { + /* A = raw_smp_processor_id() */ + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_CPU }, + /* return A */ + { BPF_RET | BPF_A, 0, 0, 0 }, + }; + struct sock_fprog p = { + .len = 2, + .filter = code, + }; + + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) + error(1, errno, "failed to set SO_ATTACH_REUSEPORT_CBPF"); +} + +static void send_from_cpu(int cpu_id, int family, int proto) +{ + struct sockaddr_storage saddr, daddr; + struct sockaddr_in *saddr4, *daddr4; + struct sockaddr_in6 *saddr6, *daddr6; + cpu_set_t cpu_set; + int fd; + + switch (family) { + case AF_INET: + saddr4 = (struct sockaddr_in *)&saddr; + saddr4->sin_family = AF_INET; + saddr4->sin_addr.s_addr = htonl(INADDR_ANY); + saddr4->sin_port = 0; + + daddr4 = (struct sockaddr_in *)&daddr; + daddr4->sin_family = AF_INET; + daddr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + daddr4->sin_port = htons(PORT); + break; + case AF_INET6: + saddr6 = (struct sockaddr_in6 *)&saddr; + saddr6->sin6_family = AF_INET6; + saddr6->sin6_addr = in6addr_any; + saddr6->sin6_port = 0; + + daddr6 = (struct sockaddr_in6 *)&daddr; + daddr6->sin6_family = AF_INET6; + daddr6->sin6_addr = in6addr_loopback; + daddr6->sin6_port = htons(PORT); + break; + default: + error(1, 0, "Unsupported family %d", family); + } + + memset(&cpu_set, 0, sizeof(cpu_set)); + CPU_SET(cpu_id, &cpu_set); + if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) + error(1, errno, "failed to pin to cpu"); + + fd = socket(family, proto, 0); + if (fd < 0) + error(1, errno, "failed to create send socket"); + + if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr))) + error(1, errno, "failed to bind send socket"); + + if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr))) + error(1, errno, "failed to connect send socket"); + + if (send(fd, "a", 1, 0) < 0) + error(1, errno, "failed to send message"); + + close(fd); +} + +static +void receive_on_cpu(int *rcv_fd, int len, int epfd, int cpu_id, int proto) +{ + struct epoll_event ev; + int i, fd; + char buf[8]; + + i = epoll_wait(epfd, &ev, 1, -1); + if (i < 0) + error(1, errno, "epoll_wait failed"); + + if (proto == SOCK_STREAM) { + fd = accept(ev.data.fd, NULL, NULL); + if (fd < 0) + error(1, errno, "failed to accept"); + i = recv(fd, buf, sizeof(buf), 0); + close(fd); + } else { + i = recv(ev.data.fd, buf, sizeof(buf), 0); + } + + if (i < 0) + error(1, errno, "failed to recv"); + + for (i = 0; i < len; ++i) + if (ev.data.fd == rcv_fd[i]) + break; + if (i == len) + error(1, 0, "failed to find socket"); + fprintf(stderr, "send cpu %d, receive socket %d\n", cpu_id, i); + if (cpu_id != i) + error(1, 0, "cpu id/receive socket mismatch"); +} + +static void test(int *rcv_fd, int len, int family, int proto) +{ + struct epoll_event ev; + int epfd, cpu; + + build_rcv_group(rcv_fd, len, family, proto); + attach_bpf(rcv_fd[0]); + + epfd = epoll_create(1); + if (epfd < 0) + error(1, errno, "failed to create epoll"); + for (cpu = 0; cpu < len; ++cpu) { + ev.events = EPOLLIN; + ev.data.fd = rcv_fd[cpu]; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fd[cpu], &ev)) + error(1, errno, "failed to register sock epoll"); + } + + /* Forward iterate */ + for (cpu = 0; cpu < len; ++cpu) { + send_from_cpu(cpu, family, proto); + receive_on_cpu(rcv_fd, len, epfd, cpu, proto); + } + + /* Reverse iterate */ + for (cpu = len - 1; cpu >= 0; --cpu) { + send_from_cpu(cpu, family, proto); + receive_on_cpu(rcv_fd, len, epfd, cpu, proto); + } + + /* Even cores */ + for (cpu = 0; cpu < len; cpu += 2) { + send_from_cpu(cpu, family, proto); + receive_on_cpu(rcv_fd, len, epfd, cpu, proto); + } + + /* Odd cores */ + for (cpu = 1; cpu < len; cpu += 2) { + send_from_cpu(cpu, family, proto); + receive_on_cpu(rcv_fd, len, epfd, cpu, proto); + } + + close(epfd); + for (cpu = 0; cpu < len; ++cpu) + close(rcv_fd[cpu]); +} + +int main(void) +{ + int *rcv_fd, cpus; + + cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (cpus <= 0) + error(1, errno, "failed counting cpus"); + + rcv_fd = calloc(cpus, sizeof(int)); + if (!rcv_fd) + error(1, 0, "failed to allocate array"); + + fprintf(stderr, "---- IPv4 UDP ----\n"); + test(rcv_fd, cpus, AF_INET, SOCK_DGRAM); + + fprintf(stderr, "---- IPv6 UDP ----\n"); + test(rcv_fd, cpus, AF_INET6, SOCK_DGRAM); + + fprintf(stderr, "---- IPv4 TCP ----\n"); + test(rcv_fd, cpus, AF_INET, SOCK_STREAM); + + fprintf(stderr, "---- IPv6 TCP ----\n"); + test(rcv_fd, cpus, AF_INET6, SOCK_STREAM); + + free(rcv_fd); + + fprintf(stderr, "SUCCESS\n"); + return 0; +} diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c index e86d937cc22c..60fe3c569bd9 100644 --- a/tools/testing/selftests/timers/valid-adjtimex.c +++ b/tools/testing/selftests/timers/valid-adjtimex.c @@ -45,7 +45,17 @@ static inline int ksft_exit_fail(void) } #endif -#define NSEC_PER_SEC 1000000000L +#define NSEC_PER_SEC 1000000000LL +#define USEC_PER_SEC 1000000LL + +#define ADJ_SETOFFSET 0x0100 + +#include <sys/syscall.h> +static int clock_adjtime(clockid_t id, struct timex *tx) +{ + return syscall(__NR_clock_adjtime, id, tx); +} + /* clear NTP time_status & time_state */ int clear_time_state(void) @@ -193,10 +203,137 @@ out: } +int set_offset(long long offset, int use_nano) +{ + struct timex tmx = {}; + int ret; + + tmx.modes = ADJ_SETOFFSET; + if (use_nano) { + tmx.modes |= ADJ_NANO; + + tmx.time.tv_sec = offset / NSEC_PER_SEC; + tmx.time.tv_usec = offset % NSEC_PER_SEC; + + if (offset < 0 && tmx.time.tv_usec) { + tmx.time.tv_sec -= 1; + tmx.time.tv_usec += NSEC_PER_SEC; + } + } else { + tmx.time.tv_sec = offset / USEC_PER_SEC; + tmx.time.tv_usec = offset % USEC_PER_SEC; + + if (offset < 0 && tmx.time.tv_usec) { + tmx.time.tv_sec -= 1; + tmx.time.tv_usec += USEC_PER_SEC; + } + } + + ret = clock_adjtime(CLOCK_REALTIME, &tmx); + if (ret < 0) { + printf("(sec: %ld usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec); + printf("[FAIL]\n"); + return -1; + } + return 0; +} + +int set_bad_offset(long sec, long usec, int use_nano) +{ + struct timex tmx = {}; + int ret; + + tmx.modes = ADJ_SETOFFSET; + if (use_nano) + tmx.modes |= ADJ_NANO; + + tmx.time.tv_sec = sec; + tmx.time.tv_usec = usec; + ret = clock_adjtime(CLOCK_REALTIME, &tmx); + if (ret >= 0) { + printf("Invalid (sec: %ld usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec); + printf("[FAIL]\n"); + return -1; + } + return 0; +} + +int validate_set_offset(void) +{ + printf("Testing ADJ_SETOFFSET... "); + + /* Test valid values */ + if (set_offset(NSEC_PER_SEC - 1, 1)) + return -1; + + if (set_offset(-NSEC_PER_SEC + 1, 1)) + return -1; + + if (set_offset(-NSEC_PER_SEC - 1, 1)) + return -1; + + if (set_offset(5 * NSEC_PER_SEC, 1)) + return -1; + + if (set_offset(-5 * NSEC_PER_SEC, 1)) + return -1; + + if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1)) + return -1; + + if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1)) + return -1; + + if (set_offset(USEC_PER_SEC - 1, 0)) + return -1; + + if (set_offset(-USEC_PER_SEC + 1, 0)) + return -1; + + if (set_offset(-USEC_PER_SEC - 1, 0)) + return -1; + + if (set_offset(5 * USEC_PER_SEC, 0)) + return -1; + + if (set_offset(-5 * USEC_PER_SEC, 0)) + return -1; + + if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0)) + return -1; + + if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0)) + return -1; + + /* Test invalid values */ + if (set_bad_offset(0, -1, 1)) + return -1; + if (set_bad_offset(0, -1, 0)) + return -1; + if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1)) + return -1; + if (set_bad_offset(0, 2 * USEC_PER_SEC, 0)) + return -1; + if (set_bad_offset(0, NSEC_PER_SEC, 1)) + return -1; + if (set_bad_offset(0, USEC_PER_SEC, 0)) + return -1; + if (set_bad_offset(0, -NSEC_PER_SEC, 1)) + return -1; + if (set_bad_offset(0, -USEC_PER_SEC, 0)) + return -1; + + printf("[OK]\n"); + return 0; +} + int main(int argc, char **argv) { if (validate_freq()) return ksft_exit_fail(); + if (validate_set_offset()) + return ksft_exit_fail(); + return ksft_exit_pass(); } diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h index 26b7926bda88..ba34f9e96efd 100644 --- a/tools/virtio/asm/barrier.h +++ b/tools/virtio/asm/barrier.h @@ -1,15 +1,19 @@ #if defined(__i386__) || defined(__x86_64__) #define barrier() asm volatile("" ::: "memory") -#define mb() __sync_synchronize() - -#define smp_mb() mb() -# define dma_rmb() barrier() -# define dma_wmb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() +#define virt_mb() __sync_synchronize() +#define virt_rmb() barrier() +#define virt_wmb() barrier() +/* Atomic store should be enough, but gcc generates worse code in that case. */ +#define virt_store_mb(var, value) do { \ + typeof(var) virt_store_mb_value = (value); \ + __atomic_exchange(&(var), &virt_store_mb_value, &virt_store_mb_value, \ + __ATOMIC_SEQ_CST); \ + barrier(); \ +} while (0); /* Weak barriers should be used. If not - it's a bug */ -# define rmb() abort() -# define wmb() abort() +# define mb() abort() +# define rmb() abort() +# define wmb() abort() #else #error Please fill in barrier macros #endif diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h new file mode 100644 index 000000000000..845960e1cbf2 --- /dev/null +++ b/tools/virtio/linux/compiler.h @@ -0,0 +1,9 @@ +#ifndef LINUX_COMPILER_H +#define LINUX_COMPILER_H + +#define WRITE_ONCE(var, val) \ + (*((volatile typeof(val) *)(&(var))) = (val)) + +#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var)))) + +#endif diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 4db7d5691ba7..033849948215 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -8,6 +8,7 @@ #include <assert.h> #include <stdarg.h> +#include <linux/compiler.h> #include <linux/types.h> #include <linux/printk.h> #include <linux/bug.h> diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile new file mode 100644 index 000000000000..feaa64ac4630 --- /dev/null +++ b/tools/virtio/ringtest/Makefile @@ -0,0 +1,22 @@ +all: + +all: ring virtio_ring_0_9 virtio_ring_poll + +CFLAGS += -Wall +CFLAGS += -pthread -O2 -ggdb +LDFLAGS += -pthread -O2 -ggdb + +main.o: main.c main.h +ring.o: ring.c main.h +virtio_ring_0_9.o: virtio_ring_0_9.c main.h +virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h +ring: ring.o main.o +virtio_ring_0_9: virtio_ring_0_9.o main.o +virtio_ring_poll: virtio_ring_poll.o main.o +clean: + -rm main.o + -rm ring.o ring + -rm virtio_ring_0_9.o virtio_ring_0_9 + -rm virtio_ring_poll.o virtio_ring_poll + +.PHONY: all clean diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README new file mode 100644 index 000000000000..34e94c46104f --- /dev/null +++ b/tools/virtio/ringtest/README @@ -0,0 +1,2 @@ +Partial implementation of various ring layouts, useful to tune virtio design. +Uses shared memory heavily. diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c new file mode 100644 index 000000000000..3a5ff438bd62 --- /dev/null +++ b/tools/virtio/ringtest/main.c @@ -0,0 +1,366 @@ +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Command line processing and common functions for ring benchmarking. + */ +#define _GNU_SOURCE +#include <getopt.h> +#include <pthread.h> +#include <assert.h> +#include <sched.h> +#include "main.h" +#include <sys/eventfd.h> +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <limits.h> + +int runcycles = 10000000; +int max_outstanding = INT_MAX; +int batch = 1; + +bool do_sleep = false; +bool do_relax = false; +bool do_exit = true; + +unsigned ring_size = 256; + +static int kickfd = -1; +static int callfd = -1; + +void notify(int fd) +{ + unsigned long long v = 1; + int r; + + vmexit(); + r = write(fd, &v, sizeof v); + assert(r == sizeof v); + vmentry(); +} + +void wait_for_notify(int fd) +{ + unsigned long long v = 1; + int r; + + vmexit(); + r = read(fd, &v, sizeof v); + assert(r == sizeof v); + vmentry(); +} + +void kick(void) +{ + notify(kickfd); +} + +void wait_for_kick(void) +{ + wait_for_notify(kickfd); +} + +void call(void) +{ + notify(callfd); +} + +void wait_for_call(void) +{ + wait_for_notify(callfd); +} + +void set_affinity(const char *arg) +{ + cpu_set_t cpuset; + int ret; + pthread_t self; + long int cpu; + char *endptr; + + if (!arg) + return; + + cpu = strtol(arg, &endptr, 0); + assert(!*endptr); + + assert(cpu >= 0 || cpu < CPU_SETSIZE); + + self = pthread_self(); + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + + ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset); + assert(!ret); +} + +static void run_guest(void) +{ + int completed_before; + int completed = 0; + int started = 0; + int bufs = runcycles; + int spurious = 0; + int r; + unsigned len; + void *buf; + int tokick = batch; + + for (;;) { + if (do_sleep) + disable_call(); + completed_before = completed; + do { + if (started < bufs && + started - completed < max_outstanding) { + r = add_inbuf(0, NULL, "Hello, world!"); + if (__builtin_expect(r == 0, true)) { + ++started; + if (!--tokick) { + tokick = batch; + if (do_sleep) + kick_available(); + } + + } + } else + r = -1; + + /* Flush out completed bufs if any */ + if (get_buf(&len, &buf)) { + ++completed; + if (__builtin_expect(completed == bufs, false)) + return; + r = 0; + } + } while (r == 0); + if (completed == completed_before) + ++spurious; + assert(completed <= bufs); + assert(started <= bufs); + if (do_sleep) { + if (enable_call()) + wait_for_call(); + } else { + poll_used(); + } + } +} + +static void run_host(void) +{ + int completed_before; + int completed = 0; + int spurious = 0; + int bufs = runcycles; + unsigned len; + void *buf; + + for (;;) { + if (do_sleep) { + if (enable_kick()) + wait_for_kick(); + } else { + poll_avail(); + } + if (do_sleep) + disable_kick(); + completed_before = completed; + while (__builtin_expect(use_buf(&len, &buf), true)) { + if (do_sleep) + call_used(); + ++completed; + if (__builtin_expect(completed == bufs, false)) + return; + } + if (completed == completed_before) + ++spurious; + assert(completed <= bufs); + if (completed == bufs) + break; + } +} + +void *start_guest(void *arg) +{ + set_affinity(arg); + run_guest(); + pthread_exit(NULL); +} + +void *start_host(void *arg) +{ + set_affinity(arg); + run_host(); + pthread_exit(NULL); +} + +static const char optstring[] = ""; +static const struct option longopts[] = { + { + .name = "help", + .has_arg = no_argument, + .val = 'h', + }, + { + .name = "host-affinity", + .has_arg = required_argument, + .val = 'H', + }, + { + .name = "guest-affinity", + .has_arg = required_argument, + .val = 'G', + }, + { + .name = "ring-size", + .has_arg = required_argument, + .val = 'R', + }, + { + .name = "run-cycles", + .has_arg = required_argument, + .val = 'C', + }, + { + .name = "outstanding", + .has_arg = required_argument, + .val = 'o', + }, + { + .name = "batch", + .has_arg = required_argument, + .val = 'b', + }, + { + .name = "sleep", + .has_arg = no_argument, + .val = 's', + }, + { + .name = "relax", + .has_arg = no_argument, + .val = 'x', + }, + { + .name = "exit", + .has_arg = no_argument, + .val = 'e', + }, + { + } +}; + +static void help(void) +{ + fprintf(stderr, "Usage: <test> [--help]" + " [--host-affinity H]" + " [--guest-affinity G]" + " [--ring-size R (default: %d)]" + " [--run-cycles C (default: %d)]" + " [--batch b]" + " [--outstanding o]" + " [--sleep]" + " [--relax]" + " [--exit]" + "\n", + ring_size, + runcycles); +} + +int main(int argc, char **argv) +{ + int ret; + pthread_t host, guest; + void *tret; + char *host_arg = NULL; + char *guest_arg = NULL; + char *endptr; + long int c; + + kickfd = eventfd(0, 0); + assert(kickfd >= 0); + callfd = eventfd(0, 0); + assert(callfd >= 0); + + for (;;) { + int o = getopt_long(argc, argv, optstring, longopts, NULL); + switch (o) { + case -1: + goto done; + case '?': + help(); + exit(2); + case 'H': + host_arg = optarg; + break; + case 'G': + guest_arg = optarg; + break; + case 'R': + ring_size = strtol(optarg, &endptr, 0); + assert(ring_size && !(ring_size & (ring_size - 1))); + assert(!*endptr); + break; + case 'C': + c = strtol(optarg, &endptr, 0); + assert(!*endptr); + assert(c > 0 && c < INT_MAX); + runcycles = c; + break; + case 'o': + c = strtol(optarg, &endptr, 0); + assert(!*endptr); + assert(c > 0 && c < INT_MAX); + max_outstanding = c; + break; + case 'b': + c = strtol(optarg, &endptr, 0); + assert(!*endptr); + assert(c > 0 && c < INT_MAX); + batch = c; + break; + case 's': + do_sleep = true; + break; + case 'x': + do_relax = true; + break; + case 'e': + do_exit = true; + break; + default: + help(); + exit(4); + break; + } + } + + /* does nothing here, used to make sure all smp APIs compile */ + smp_acquire(); + smp_release(); + smp_mb(); +done: + + if (batch > max_outstanding) + batch = max_outstanding; + + if (optind < argc) { + help(); + exit(4); + } + alloc_ring(); + + ret = pthread_create(&host, NULL, start_host, host_arg); + assert(!ret); + ret = pthread_create(&guest, NULL, start_guest, guest_arg); + assert(!ret); + + ret = pthread_join(guest, &tret); + assert(!ret); + ret = pthread_join(host, &tret); + assert(!ret); + return 0; +} diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h new file mode 100644 index 000000000000..16917acb0ade --- /dev/null +++ b/tools/virtio/ringtest/main.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Common macros and functions for ring benchmarking. + */ +#ifndef MAIN_H +#define MAIN_H + +#include <stdbool.h> + +extern bool do_exit; + +#if defined(__x86_64__) || defined(__i386__) +#include "x86intrin.h" + +static inline void wait_cycles(unsigned long long cycles) +{ + unsigned long long t; + + t = __rdtsc(); + while (__rdtsc() - t < cycles) {} +} + +#define VMEXIT_CYCLES 500 +#define VMENTRY_CYCLES 500 + +#else +static inline void wait_cycles(unsigned long long cycles) +{ + _Exit(5); +} +#define VMEXIT_CYCLES 0 +#define VMENTRY_CYCLES 0 +#endif + +static inline void vmexit(void) +{ + if (!do_exit) + return; + + wait_cycles(VMEXIT_CYCLES); +} +static inline void vmentry(void) +{ + if (!do_exit) + return; + + wait_cycles(VMENTRY_CYCLES); +} + +/* implemented by ring */ +void alloc_ring(void); +/* guest side */ +int add_inbuf(unsigned, void *, void *); +void *get_buf(unsigned *, void **); +void disable_call(); +bool enable_call(); +void kick_available(); +void poll_used(); +/* host side */ +void disable_kick(); +bool enable_kick(); +bool use_buf(unsigned *, void **); +void call_used(); +void poll_avail(); + +/* implemented by main */ +extern bool do_sleep; +void kick(void); +void wait_for_kick(void); +void call(void); +void wait_for_call(void); + +extern unsigned ring_size; + +/* Compiler barrier - similar to what Linux uses */ +#define barrier() asm volatile("" ::: "memory") + +/* Is there a portable way to do this? */ +#if defined(__x86_64__) || defined(__i386__) +#define cpu_relax() asm ("rep; nop" ::: "memory") +#else +#define cpu_relax() assert(0) +#endif + +extern bool do_relax; + +static inline void busy_wait(void) +{ + if (do_relax) + cpu_relax(); + else + /* prevent compiler from removing busy loops */ + barrier(); +} + +/* + * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized + * with other __ATOMIC_SEQ_CST calls. + */ +#define smp_mb() __sync_synchronize() + +/* + * This abuses the atomic builtins for thread fences, and + * adds a compiler barrier. + */ +#define smp_release() do { \ + barrier(); \ + __atomic_thread_fence(__ATOMIC_RELEASE); \ +} while (0) + +#define smp_acquire() do { \ + __atomic_thread_fence(__ATOMIC_ACQUIRE); \ + barrier(); \ +} while (0) + +#endif diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c new file mode 100644 index 000000000000..c25c8d248b6b --- /dev/null +++ b/tools/virtio/ringtest/ring.c @@ -0,0 +1,272 @@ +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Simple descriptor-based ring. virtio 0.9 compatible event index is used for + * signalling, unconditionally. + */ +#define _GNU_SOURCE +#include "main.h" +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +/* Next - Where next entry will be written. + * Prev - "Next" value when event triggered previously. + * Event - Peer requested event after writing this entry. + */ +static inline bool need_event(unsigned short event, + unsigned short next, + unsigned short prev) +{ + return (unsigned short)(next - event - 1) < (unsigned short)(next - prev); +} + +/* Design: + * Guest adds descriptors with unique index values and DESC_HW in flags. + * Host overwrites used descriptors with correct len, index, and DESC_HW clear. + * Flags are always set last. + */ +#define DESC_HW 0x1 + +struct desc { + unsigned short flags; + unsigned short index; + unsigned len; + unsigned long long addr; +}; + +/* how much padding is needed to avoid false cache sharing */ +#define HOST_GUEST_PADDING 0x80 + +/* Mostly read */ +struct event { + unsigned short kick_index; + unsigned char reserved0[HOST_GUEST_PADDING - 2]; + unsigned short call_index; + unsigned char reserved1[HOST_GUEST_PADDING - 2]; +}; + +struct data { + void *buf; /* descriptor is writeable, we can't get buf from there */ + void *data; +} *data; + +struct desc *ring; +struct event *event; + +struct guest { + unsigned avail_idx; + unsigned last_used_idx; + unsigned num_free; + unsigned kicked_avail_idx; + unsigned char reserved[HOST_GUEST_PADDING - 12]; +} guest; + +struct host { + /* we do not need to track last avail index + * unless we have more than one in flight. + */ + unsigned used_idx; + unsigned called_used_idx; + unsigned char reserved[HOST_GUEST_PADDING - 4]; +} host; + +/* implemented by ring */ +void alloc_ring(void) +{ + int ret; + int i; + + ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring); + if (ret) { + perror("Unable to allocate ring buffer.\n"); + exit(3); + } + event = malloc(sizeof *event); + if (!event) { + perror("Unable to allocate event buffer.\n"); + exit(3); + } + memset(event, 0, sizeof *event); + guest.avail_idx = 0; + guest.kicked_avail_idx = -1; + guest.last_used_idx = 0; + host.used_idx = 0; + host.called_used_idx = -1; + for (i = 0; i < ring_size; ++i) { + struct desc desc = { + .index = i, + }; + ring[i] = desc; + } + guest.num_free = ring_size; + data = malloc(ring_size * sizeof *data); + if (!data) { + perror("Unable to allocate data buffer.\n"); + exit(3); + } + memset(data, 0, ring_size * sizeof *data); +} + +/* guest side */ +int add_inbuf(unsigned len, void *buf, void *datap) +{ + unsigned head, index; + + if (!guest.num_free) + return -1; + + guest.num_free--; + head = (ring_size - 1) & (guest.avail_idx++); + + /* Start with a write. On MESI architectures this helps + * avoid a shared state with consumer that is polling this descriptor. + */ + ring[head].addr = (unsigned long)(void*)buf; + ring[head].len = len; + /* read below might bypass write above. That is OK because it's just an + * optimization. If this happens, we will get the cache line in a + * shared state which is unfortunate, but probably not worth it to + * add an explicit full barrier to avoid this. + */ + barrier(); + index = ring[head].index; + data[index].buf = buf; + data[index].data = datap; + /* Barrier A (for pairing) */ + smp_release(); + ring[head].flags = DESC_HW; + + return 0; +} + +void *get_buf(unsigned *lenp, void **bufp) +{ + unsigned head = (ring_size - 1) & guest.last_used_idx; + unsigned index; + void *datap; + + if (ring[head].flags & DESC_HW) + return NULL; + /* Barrier B (for pairing) */ + smp_acquire(); + *lenp = ring[head].len; + index = ring[head].index & (ring_size - 1); + datap = data[index].data; + *bufp = data[index].buf; + data[index].buf = NULL; + data[index].data = NULL; + guest.num_free++; + guest.last_used_idx++; + return datap; +} + +void poll_used(void) +{ + unsigned head = (ring_size - 1) & guest.last_used_idx; + + while (ring[head].flags & DESC_HW) + busy_wait(); +} + +void disable_call() +{ + /* Doing nothing to disable calls might cause + * extra interrupts, but reduces the number of cache misses. + */ +} + +bool enable_call() +{ + unsigned head = (ring_size - 1) & guest.last_used_idx; + + event->call_index = guest.last_used_idx; + /* Flush call index write */ + /* Barrier D (for pairing) */ + smp_mb(); + return ring[head].flags & DESC_HW; +} + +void kick_available(void) +{ + /* Flush in previous flags write */ + /* Barrier C (for pairing) */ + smp_mb(); + if (!need_event(event->kick_index, + guest.avail_idx, + guest.kicked_avail_idx)) + return; + + guest.kicked_avail_idx = guest.avail_idx; + kick(); +} + +/* host side */ +void disable_kick() +{ + /* Doing nothing to disable kicks might cause + * extra interrupts, but reduces the number of cache misses. + */ +} + +bool enable_kick() +{ + unsigned head = (ring_size - 1) & host.used_idx; + + event->kick_index = host.used_idx; + /* Barrier C (for pairing) */ + smp_mb(); + return !(ring[head].flags & DESC_HW); +} + +void poll_avail(void) +{ + unsigned head = (ring_size - 1) & host.used_idx; + + while (!(ring[head].flags & DESC_HW)) + busy_wait(); +} + +bool use_buf(unsigned *lenp, void **bufp) +{ + unsigned head = (ring_size - 1) & host.used_idx; + + if (!(ring[head].flags & DESC_HW)) + return false; + + /* make sure length read below is not speculated */ + /* Barrier A (for pairing) */ + smp_acquire(); + + /* simple in-order completion: we don't need + * to touch index at all. This also means we + * can just modify the descriptor in-place. + */ + ring[head].len--; + /* Make sure len is valid before flags. + * Note: alternative is to write len and flags in one access - + * possible on 64 bit architectures but wmb is free on Intel anyway + * so I have no way to test whether it's a gain. + */ + /* Barrier B (for pairing) */ + smp_release(); + ring[head].flags = 0; + host.used_idx++; + return true; +} + +void call_used(void) +{ + /* Flush in previous flags write */ + /* Barrier D (for pairing) */ + smp_mb(); + if (!need_event(event->call_index, + host.used_idx, + host.called_used_idx)) + return; + + host.called_used_idx = host.used_idx; + call(); +} diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh new file mode 100755 index 000000000000..52b0f71ffa8d --- /dev/null +++ b/tools/virtio/ringtest/run-on-all.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +#use last CPU for host. Why not the first? +#many devices tend to use cpu0 by default so +#it tends to be busier +HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1) + +#run command on all cpus +for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n); +do + #Don't run guest and host on same CPU + #It actually works ok if using signalling + if + (echo "$@" | grep -e "--sleep" > /dev/null) || \ + test $HOST_AFFINITY '!=' $cpu + then + echo "GUEST AFFINITY $cpu" + "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu + fi +done +echo "NO GUEST AFFINITY" +"$@" --host-affinity $HOST_AFFINITY +echo "NO AFFINITY" +"$@" diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c new file mode 100644 index 000000000000..47c9a1a18d36 --- /dev/null +++ b/tools/virtio/ringtest/virtio_ring_0_9.c @@ -0,0 +1,316 @@ +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Partial implementation of virtio 0.9. event index is used for signalling, + * unconditionally. Design roughly follows linux kernel implementation in order + * to be able to judge its performance. + */ +#define _GNU_SOURCE +#include "main.h" +#include <stdlib.h> +#include <stdio.h> +#include <assert.h> +#include <string.h> +#include <linux/virtio_ring.h> + +struct data { + void *data; +} *data; + +struct vring ring; + +/* enabling the below activates experimental ring polling code + * (which skips index reads on consumer in favor of looking at + * high bits of ring id ^ 0x8000). + */ +/* #ifdef RING_POLL */ + +/* how much padding is needed to avoid false cache sharing */ +#define HOST_GUEST_PADDING 0x80 + +struct guest { + unsigned short avail_idx; + unsigned short last_used_idx; + unsigned short num_free; + unsigned short kicked_avail_idx; + unsigned short free_head; + unsigned char reserved[HOST_GUEST_PADDING - 10]; +} guest; + +struct host { + /* we do not need to track last avail index + * unless we have more than one in flight. + */ + unsigned short used_idx; + unsigned short called_used_idx; + unsigned char reserved[HOST_GUEST_PADDING - 4]; +} host; + +/* implemented by ring */ +void alloc_ring(void) +{ + int ret; + int i; + void *p; + + ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000)); + if (ret) { + perror("Unable to allocate ring buffer.\n"); + exit(3); + } + memset(p, 0, vring_size(ring_size, 0x1000)); + vring_init(&ring, ring_size, p, 0x1000); + + guest.avail_idx = 0; + guest.kicked_avail_idx = -1; + guest.last_used_idx = 0; + /* Put everything in free lists. */ + guest.free_head = 0; + for (i = 0; i < ring_size - 1; i++) + ring.desc[i].next = i + 1; + host.used_idx = 0; + host.called_used_idx = -1; + guest.num_free = ring_size; + data = malloc(ring_size * sizeof *data); + if (!data) { + perror("Unable to allocate data buffer.\n"); + exit(3); + } + memset(data, 0, ring_size * sizeof *data); +} + +/* guest side */ +int add_inbuf(unsigned len, void *buf, void *datap) +{ + unsigned head, avail; + struct vring_desc *desc; + + if (!guest.num_free) + return -1; + + head = guest.free_head; + guest.num_free--; + + desc = ring.desc; + desc[head].flags = VRING_DESC_F_NEXT; + desc[head].addr = (unsigned long)(void *)buf; + desc[head].len = len; + /* We do it like this to simulate the way + * we'd have to flip it if we had multiple + * descriptors. + */ + desc[head].flags &= ~VRING_DESC_F_NEXT; + guest.free_head = desc[head].next; + + data[head].data = datap; + +#ifdef RING_POLL + /* Barrier A (for pairing) */ + smp_release(); + avail = guest.avail_idx++; + ring.avail->ring[avail & (ring_size - 1)] = + (head | (avail & ~(ring_size - 1))) ^ 0x8000; +#else + avail = (ring_size - 1) & (guest.avail_idx++); + ring.avail->ring[avail] = head; + /* Barrier A (for pairing) */ + smp_release(); +#endif + ring.avail->idx = guest.avail_idx; + return 0; +} + +void *get_buf(unsigned *lenp, void **bufp) +{ + unsigned head; + unsigned index; + void *datap; + +#ifdef RING_POLL + head = (ring_size - 1) & guest.last_used_idx; + index = ring.used->ring[head].id; + if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) + return NULL; + /* Barrier B (for pairing) */ + smp_acquire(); + index &= ring_size - 1; +#else + if (ring.used->idx == guest.last_used_idx) + return NULL; + /* Barrier B (for pairing) */ + smp_acquire(); + head = (ring_size - 1) & guest.last_used_idx; + index = ring.used->ring[head].id; +#endif + *lenp = ring.used->ring[head].len; + datap = data[index].data; + *bufp = (void*)(unsigned long)ring.desc[index].addr; + data[index].data = NULL; + ring.desc[index].next = guest.free_head; + guest.free_head = index; + guest.num_free++; + guest.last_used_idx++; + return datap; +} + +void poll_used(void) +{ +#ifdef RING_POLL + unsigned head = (ring_size - 1) & guest.last_used_idx; + + for (;;) { + unsigned index = ring.used->ring[head].id; + + if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) + busy_wait(); + else + break; + } +#else + unsigned head = guest.last_used_idx; + + while (ring.used->idx == head) + busy_wait(); +#endif +} + +void disable_call() +{ + /* Doing nothing to disable calls might cause + * extra interrupts, but reduces the number of cache misses. + */ +} + +bool enable_call() +{ + unsigned short last_used_idx; + + vring_used_event(&ring) = (last_used_idx = guest.last_used_idx); + /* Flush call index write */ + /* Barrier D (for pairing) */ + smp_mb(); +#ifdef RING_POLL + { + unsigned short head = last_used_idx & (ring_size - 1); + unsigned index = ring.used->ring[head].id; + + return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); + } +#else + return ring.used->idx == last_used_idx; +#endif +} + +void kick_available(void) +{ + /* Flush in previous flags write */ + /* Barrier C (for pairing) */ + smp_mb(); + if (!vring_need_event(vring_avail_event(&ring), + guest.avail_idx, + guest.kicked_avail_idx)) + return; + + guest.kicked_avail_idx = guest.avail_idx; + kick(); +} + +/* host side */ +void disable_kick() +{ + /* Doing nothing to disable kicks might cause + * extra interrupts, but reduces the number of cache misses. + */ +} + +bool enable_kick() +{ + unsigned head = host.used_idx; + + vring_avail_event(&ring) = head; + /* Barrier C (for pairing) */ + smp_mb(); +#ifdef RING_POLL + { + unsigned index = ring.avail->ring[head & (ring_size - 1)]; + + return (index ^ head ^ 0x8000) & ~(ring_size - 1); + } +#else + return head == ring.avail->idx; +#endif +} + +void poll_avail(void) +{ + unsigned head = host.used_idx; +#ifdef RING_POLL + for (;;) { + unsigned index = ring.avail->ring[head & (ring_size - 1)]; + if ((index ^ head ^ 0x8000) & ~(ring_size - 1)) + busy_wait(); + else + break; + } +#else + while (ring.avail->idx == head) + busy_wait(); +#endif +} + +bool use_buf(unsigned *lenp, void **bufp) +{ + unsigned used_idx = host.used_idx; + struct vring_desc *desc; + unsigned head; + +#ifdef RING_POLL + head = ring.avail->ring[used_idx & (ring_size - 1)]; + if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1)) + return false; + /* Barrier A (for pairing) */ + smp_acquire(); + + used_idx &= ring_size - 1; + desc = &ring.desc[head & (ring_size - 1)]; +#else + if (used_idx == ring.avail->idx) + return false; + + /* Barrier A (for pairing) */ + smp_acquire(); + + used_idx &= ring_size - 1; + head = ring.avail->ring[used_idx]; + desc = &ring.desc[head]; +#endif + + *lenp = desc->len; + *bufp = (void *)(unsigned long)desc->addr; + + /* now update used ring */ + ring.used->ring[used_idx].id = head; + ring.used->ring[used_idx].len = desc->len - 1; + /* Barrier B (for pairing) */ + smp_release(); + host.used_idx++; + ring.used->idx = host.used_idx; + + return true; +} + +void call_used(void) +{ + /* Flush in previous flags write */ + /* Barrier D (for pairing) */ + smp_mb(); + if (!vring_need_event(vring_used_event(&ring), + host.used_idx, + host.called_used_idx)) + return; + + host.called_used_idx = host.used_idx; + call(); +} diff --git a/tools/virtio/ringtest/virtio_ring_poll.c b/tools/virtio/ringtest/virtio_ring_poll.c new file mode 100644 index 000000000000..84fc2c557aaa --- /dev/null +++ b/tools/virtio/ringtest/virtio_ring_poll.c @@ -0,0 +1,2 @@ +#define RING_POLL 1 +#include "virtio_ring_0_9.c" |