diff options
author | 2019-02-04 16:55:44 +0000 | |
---|---|---|
committer | 2019-02-04 16:55:44 +0000 | |
commit | 76c648e7a477ffb2a882ad5ffe523269bd9a3f6a (patch) | |
tree | 29d319d598650bab04e4f58e5e8769567e33091e /lib/libcxx/utils/google-benchmark/test | |
parent | Import libc++abi 7.0.1. (diff) | |
download | wireguard-openbsd-76c648e7a477ffb2a882ad5ffe523269bd9a3f6a.tar.xz wireguard-openbsd-76c648e7a477ffb2a882ad5ffe523269bd9a3f6a.zip |
Import libc++ 7.0.1.
Diffstat (limited to 'lib/libcxx/utils/google-benchmark/test')
27 files changed, 2059 insertions, 171 deletions
diff --git a/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt b/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt index 14ba7a6e2da..f49ca5148f4 100644 --- a/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt +++ b/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt @@ -1,6 +1,7 @@ # Enable the tests find_package(Threads REQUIRED) +include(CheckCXXCompilerFlag) # NOTE: Some tests use `<cassert>` to perform the test. Therefore we must # strip -DNDEBUG from the default CMake flags in DEBUG mode. @@ -21,19 +22,29 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) endforeach() endif() +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +set(BENCHMARK_O3_FLAG "") +if (BENCHMARK_HAS_O3_FLAG) + set(BENCHMARK_O3_FLAG "-O3") +endif() + # NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise # they will break the configuration check. if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) endif() -add_library(output_test_helper STATIC output_test_helper.cc) +add_library(output_test_helper STATIC output_test_helper.cc output_test.h) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_benchmark_test) +macro(compile_benchmark_test_with_main name) + add_executable(${name} "${name}.cc") + target_link_libraries(${name} benchmark_main) +endmacro(compile_benchmark_test_with_main) macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) @@ -41,7 +52,6 @@ macro(compile_output_test name) ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) - # Demonstration executable compile_benchmark_test(benchmark_test) add_test(benchmark benchmark_test --benchmark_min_time=0.01) @@ -53,14 +63,23 @@ macro(add_filter_test name filter expect) endmacro(add_filter_test) add_filter_test(filter_simple "Foo" 3) +add_filter_test(filter_simple_negative "-Foo" 2) add_filter_test(filter_suffix "BM_.*" 4) +add_filter_test(filter_suffix_negative "-BM_.*" 1) add_filter_test(filter_regex_all ".*" 5) +add_filter_test(filter_regex_all_negative "-.*" 0) add_filter_test(filter_regex_blank "" 5) +add_filter_test(filter_regex_blank_negative "-" 0) add_filter_test(filter_regex_none "monkey" 0) +add_filter_test(filter_regex_none_negative "-monkey" 5) add_filter_test(filter_regex_wildcard ".*Foo.*" 3) +add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2) add_filter_test(filter_regex_begin "^BM_.*" 4) +add_filter_test(filter_regex_begin_negative "-^BM_.*" 1) add_filter_test(filter_regex_begin2 "^N" 1) +add_filter_test(filter_regex_begin2_negative "-^N" 4) add_filter_test(filter_regex_end ".*Ba$" 1) +add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) add_test(options_benchmarks options_test --benchmark_min_time=0.01) @@ -75,6 +94,11 @@ compile_benchmark_test(skip_with_error_test) add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01) compile_benchmark_test(donotoptimize_test) +# Some of the issues with DoNotOptimize only occur when optimization is enabled +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +if (BENCHMARK_HAS_O3_FLAG) + set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") +endif() add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01) compile_benchmark_test(fixture_test) @@ -89,18 +113,37 @@ add_test(map_test map_test --benchmark_min_time=0.01) compile_benchmark_test(multiple_ranges_test) add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01) +compile_benchmark_test_with_main(link_main_test) +add_test(link_main_test link_main_test --benchmark_min_time=0.01) + compile_output_test(reporter_output_test) add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) +compile_output_test(templated_fixture_test) +add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_test) +add_test(user_counters_test user_counters_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_tabular_test) +add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) + check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) if (BENCHMARK_HAS_CXX03_FLAG) - set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test - PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}") + PROPERTIES + COMPILE_FLAGS "-std=c++03") + # libstdc++ provides different definitions within <map> between dialects. When + # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation + # causing the test to fail to compile. To prevent this we explicitly disable + # the warning. + check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) + if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) + set_target_properties(cxx03_test + PROPERTIES + LINK_FLAGS "-Wno-odr") + endif() add_test(cxx03 cxx03_test --benchmark_min_time=0.01) endif() @@ -113,6 +156,53 @@ endif() compile_output_test(complexity_test) add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) +############################################################################### +# GoogleTest Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_GTEST_TESTS) + macro(compile_gtest name) + add_executable(${name} "${name}.cc") + if (TARGET googletest) + add_dependencies(${name} googletest) + endif() + if (GTEST_INCLUDE_DIRS) + target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS}) + endif() + target_link_libraries(${name} benchmark + ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) + endmacro(compile_gtest) + + macro(add_gtest name) + compile_gtest(${name}) + add_test(${name} ${name}) + endmacro() + + add_gtest(benchmark_gtest) + add_gtest(statistics_gtest) + add_gtest(string_util_gtest) +endif(BENCHMARK_ENABLE_GTEST_TESTS) + +############################################################################### +# Assembly Unit Tests +############################################################################### + +if (BENCHMARK_ENABLE_ASSEMBLY_TESTS) + if (NOT LLVM_FILECHECK_EXE) + message(FATAL_ERROR "LLVM FileCheck is required when including this file") + endif() + include(AssemblyTests.cmake) + add_filecheck_test(donotoptimize_assembly_test) + add_filecheck_test(state_assembly_test) + add_filecheck_test(clobber_memory_assembly_test) +endif() + + + +############################################################################### +# Code Coverage Configuration +############################################################################### + # Add the coverage command(s) if(CMAKE_BUILD_TYPE) string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) diff --git a/lib/libcxx/utils/google-benchmark/test/basic_test.cc b/lib/libcxx/utils/google-benchmark/test/basic_test.cc index 22de007cb6d..d07fbc00b15 100644 --- a/lib/libcxx/utils/google-benchmark/test/basic_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/basic_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) void BM_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } } @@ -12,7 +12,7 @@ BENCHMARK(BM_empty); BENCHMARK(BM_empty)->ThreadPerCpu(); void BM_spin_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } @@ -25,7 +25,7 @@ void BM_spin_pause_before(benchmark::State& state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before); BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); void BM_spin_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); @@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during); BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); void BM_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); state.ResumeTiming(); } @@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime(); BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); void BM_spin_pause_after(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -77,7 +77,7 @@ void BM_spin_pause_before_and_after(benchmark::State& state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -90,10 +90,47 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); void BM_empty_stop_start(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); -BENCHMARK_MAIN() + +void BM_KeepRunning(benchmark::State& state) { + size_t iter_count = 0; + assert(iter_count == state.iterations()); + while (state.KeepRunning()) { + ++iter_count; + } + assert(iter_count == state.iterations()); +} +BENCHMARK(BM_KeepRunning); + +void BM_KeepRunningBatch(benchmark::State& state) { + // Choose a prime batch size to avoid evenly dividing max_iterations. + const size_t batch_size = 101; + size_t iter_count = 0; + while (state.KeepRunningBatch(batch_size)) { + iter_count += batch_size; + } + assert(state.iterations() == iter_count); +} +BENCHMARK(BM_KeepRunningBatch); + +void BM_RangedFor(benchmark::State& state) { + size_t iter_count = 0; + for (auto _ : state) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_RangedFor); + +// Ensure that StateIterator provides all the necessary typedefs required to +// instantiate std::iterator_traits. +static_assert(std::is_same< + typename std::iterator_traits<benchmark::State::StateIterator>::value_type, + typename benchmark::State::StateIterator::value_type>::value, ""); + +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/benchmark_gtest.cc b/lib/libcxx/utils/google-benchmark/test/benchmark_gtest.cc new file mode 100644 index 00000000000..10683b433ab --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/benchmark_gtest.cc @@ -0,0 +1,33 @@ +#include <vector> + +#include "../src/benchmark_register.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace { + +TEST(AddRangeTest, Simple) { + std::vector<int> dst; + AddRange(&dst, 1, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Simple64) { + std::vector<int64_t> dst; + AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2)); +} + +TEST(AddRangeTest, Advanced) { + std::vector<int> dst; + AddRange(&dst, 5, 15, 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +TEST(AddRangeTest, Advanced64) { + std::vector<int64_t> dst; + AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2); + EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); +} + +} // end namespace diff --git a/lib/libcxx/utils/google-benchmark/test/benchmark_test.cc b/lib/libcxx/utils/google-benchmark/test/benchmark_test.cc index 57731331e6d..3cd4f5565fa 100644 --- a/lib/libcxx/utils/google-benchmark/test/benchmark_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/benchmark_test.cc @@ -40,9 +40,9 @@ double CalculatePi(int depth) { return (pi - 1.0) * 4; } -std::set<int> ConstructRandomSet(int size) { - std::set<int> s; - for (int i = 0; i < size; ++i) s.insert(i); +std::set<int64_t> ConstructRandomSet(int64_t size) { + std::set<int64_t> s; + for (int i = 0; i < size; ++i) s.insert(s.end(), i); return s; } @@ -53,7 +53,7 @@ std::vector<int>* test_vector = nullptr; static void BM_Factorial(benchmark::State& state) { int fac_42 = 0; - while (state.KeepRunning()) fac_42 = Factorial(8); + for (auto _ : state) fac_42 = Factorial(8); // Prevent compiler optimizations std::stringstream ss; ss << fac_42; @@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime(); static void BM_CalculatePiRange(benchmark::State& state) { double pi = 0.0; - while (state.KeepRunning()) pi = CalculatePi(state.range(0)); + for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0))); std::stringstream ss; ss << pi; state.SetLabel(ss.str()); @@ -73,8 +73,8 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; - while (state.KeepRunning()) { - benchmark::DoNotOptimize(CalculatePi(depth)); + for (auto _ : state) { + benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth))); } } BENCHMARK(BM_CalculatePi)->Threads(8); @@ -82,26 +82,30 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); static void BM_SetInsert(benchmark::State& state) { - while (state.KeepRunning()) { + std::set<int64_t> data; + for (auto _ : state) { state.PauseTiming(); - std::set<int> data = ConstructRandomSet(state.range(0)); + data = ConstructRandomSet(state.range(0)); state.ResumeTiming(); for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); } -BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}}); + +// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, +// non-timed part of each iteration will make the benchmark take forever. +BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template <typename Container, typename ValueType = typename Container::value_type> static void BM_Sequential(benchmark::State& state) { ValueType v = 42; - while (state.KeepRunning()) { + for (auto _ : state) { Container c; - for (int i = state.range(0); --i;) c.push_back(v); + for (int64_t i = state.range(0); --i;) c.push_back(v); } - const size_t items_processed = state.iterations() * state.range(0); + const int64_t items_processed = state.iterations() * state.range(0); state.SetItemsProcessed(items_processed); state.SetBytesProcessed(items_processed * sizeof(v)); } @@ -109,14 +113,15 @@ BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int) ->Range(1 << 0, 1 << 10); BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10); // Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512); #endif static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); - while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2)); + size_t len = static_cast<size_t>(state.range(0)); + std::string s1(len, '-'); + std::string s2(len, '-'); + for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); @@ -126,7 +131,7 @@ static void BM_SetupTeardown(benchmark::State& state) { test_vector = new std::vector<int>(); } int i = 0; - while (state.KeepRunning()) { + for (auto _ : state) { std::lock_guard<std::mutex> l(test_vector_mu); if (i % 2 == 0) test_vector->push_back(i); @@ -142,7 +147,7 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); static void BM_LongTest(benchmark::State& state) { double tracker = 0.0; - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) benchmark::DoNotOptimize(tracker += i); } @@ -150,16 +155,16 @@ static void BM_LongTest(benchmark::State& state) { BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); static void BM_ParallelMemset(benchmark::State& state) { - int size = state.range(0) / static_cast<int>(sizeof(int)); - int thread_size = size / state.threads; + int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int)); + int thread_size = static_cast<int>(size) / state.threads; int from = thread_size * state.thread_index; int to = from + thread_size; if (state.thread_index == 0) { - test_vector = new std::vector<int>(size); + test_vector = new std::vector<int>(static_cast<size_t>(size)); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. @@ -174,12 +179,12 @@ static void BM_ParallelMemset(benchmark::State& state) { BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); static void BM_ManualTiming(benchmark::State& state) { - size_t slept_for = 0; - int microseconds = state.range(0); + int64_t slept_for = 0; + int64_t microseconds = state.range(0); std::chrono::duration<double, std::micro> sleep_duration{ static_cast<double>(microseconds)}; - while (state.KeepRunning()) { + for (auto _ : state) { auto start = std::chrono::high_resolution_clock::now(); // Simulate some useful workload with a sleep std::this_thread::sleep_for( @@ -197,11 +202,11 @@ static void BM_ManualTiming(benchmark::State& state) { BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 template <class... Args> void BM_with_args(benchmark::State& state, Args&&...) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); @@ -213,24 +218,7 @@ void BM_non_template_args(benchmark::State& state, int, double) { } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); -static void BM_UserCounter(benchmark::State& state) { - static const int depth = 1024; - while (state.KeepRunning()) { - benchmark::DoNotOptimize(CalculatePi(depth)); - } - state.counters["Foo"] = 1; - state.counters["Bar"] = 2; - state.counters["Baz"] = 3; - state.counters["Bat"] = 5; -#ifdef BENCHMARK_HAS_CXX11 - state.counters.insert({{"Foo", 2}, {"Bar", 3}, {"Baz", 5}, {"Bat", 6}}); -#endif -} -BENCHMARK(BM_UserCounter)->Threads(8); -BENCHMARK(BM_UserCounter)->ThreadRange(1, 32); -BENCHMARK(BM_UserCounter)->ThreadPerCpu(); - -#endif // __cplusplus >= 201103L +#endif // BENCHMARK_HAS_CXX11 static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { @@ -254,4 +242,4 @@ BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc b/lib/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc new file mode 100644 index 00000000000..f41911a39ce --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/clobber_memory_assembly_test.cc @@ -0,0 +1,64 @@ +#include <benchmark/benchmark.h> + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +} + +// CHECK-LABEL: test_basic: +extern "C" void test_basic() { + int x; + benchmark::DoNotOptimize(&x); + x = 101; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: movl $101, [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_store: +extern "C" void test_redundant_store() { + ExternInt = 3; + benchmark::ClobberMemory(); + ExternInt = 51; + // CHECK-DAG: ExternInt + // CHECK-DAG: movl $3 + // CHECK: movl $51 +} + +// CHECK-LABEL: test_redundant_read: +extern "C" void test_redundant_read() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK-NOT: ExternInt2 + // CHECK: ret +} + +// CHECK-LABEL: test_redundant_read2: +extern "C" void test_redundant_read2() { + int x; + benchmark::DoNotOptimize(&x); + x = ExternInt; + benchmark::ClobberMemory(); + x = ExternInt2; + benchmark::ClobberMemory(); + // CHECK: leaq [[DEST:[^,]+]], %rax + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ExternInt2(%rip) + // CHECK: movl %eax, [[DEST]] + // CHECK: ret +} diff --git a/lib/libcxx/utils/google-benchmark/test/complexity_test.cc b/lib/libcxx/utils/google-benchmark/test/complexity_test.cc index 14e03b06eb1..5f91660898b 100644 --- a/lib/libcxx/utils/google-benchmark/test/complexity_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/complexity_test.cc @@ -25,8 +25,8 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"cpu_coefficient\": [0-9]+,$", MR_Next}, - {"\"real_coefficient\": [0-9]{1,5},$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, {"\"big_o\": \"%bigo\",$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}, @@ -46,7 +46,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < 1024; ++i) { benchmark::DoNotOptimize(&i); } @@ -55,7 +55,7 @@ void BM_Complexity_O1(benchmark::State& state) { } BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) { +BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) { return 1.0; }); @@ -81,20 +81,20 @@ ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1); // --------------------------- Testing BigO O(N) --------------------------- // // ========================================================================= // -std::vector<int> ConstructRandomVector(int size) { +std::vector<int> ConstructRandomVector(int64_t size) { std::vector<int> v; - v.reserve(size); + v.reserve(static_cast<int>(size)); for (int i = 0; i < size; ++i) { - v.push_back(std::rand() % size); + v.push_back(static_cast<int>(std::rand() % size)); } return v; } void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); - const int item_not_in_vector = - state.range(0) * 2; // Test worst case scenario (item not in vector) - while (state.KeepRunning()) { + // Test worst case scenario (item not in vector) + const int64_t item_not_in_vector = state.range(0) * 2; + for (auto _ : state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } state.SetComplexityN(state.range(0)); @@ -106,7 +106,7 @@ BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) -> double { return n; }); + ->Complexity([](int64_t n) -> double { return static_cast<double>(n); }); BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) @@ -129,11 +129,12 @@ ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n); static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::sort(v.begin(), v.end()); } state.SetComplexityN(state.range(0)); } +static const double kLog2E = 1.44269504088896340736; BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) @@ -141,7 +142,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) { return n * std::log2(n); }); + ->Complexity([](int64_t n) { return kLog2E * n * log(static_cast<double>(n)); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) diff --git a/lib/libcxx/utils/google-benchmark/test/cxx03_test.cc b/lib/libcxx/utils/google-benchmark/test/cxx03_test.cc index a79d964e17b..baa9ed9262b 100644 --- a/lib/libcxx/utils/google-benchmark/test/cxx03_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/cxx03_test.cc @@ -8,6 +8,10 @@ #error C++11 or greater detected. Should be C++03. #endif +#ifdef BENCHMARK_HAS_CXX11 +#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. +#endif + void BM_empty(benchmark::State& state) { while (state.KeepRunning()) { volatile std::size_t x = state.iterations(); @@ -39,10 +43,21 @@ void BM_template1(benchmark::State& state) { BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); +template <class T> +struct BM_Fixture : public ::benchmark::Fixture { +}; + +BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { + BM_empty(state); +} + void BM_counters(benchmark::State& state) { BM_empty(state); state.counters["Foo"] = 2; } BENCHMARK(BM_counters); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/diagnostics_test.cc b/lib/libcxx/utils/google-benchmark/test/diagnostics_test.cc index 1046730b0fc..dd64a336553 100644 --- a/lib/libcxx/utils/google-benchmark/test/diagnostics_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/diagnostics_test.cc @@ -11,7 +11,7 @@ #include <stdexcept> #include "../src/check.h" -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #if defined(__GNUC__) && !defined(__EXCEPTIONS) #define TEST_HAS_NO_EXCEPTIONS @@ -47,7 +47,7 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } @@ -57,6 +57,22 @@ void BM_diagnostic_test(benchmark::State& state) { } BENCHMARK(BM_diagnostic_test); + +void BM_diagnostic_test_keep_running(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + while(state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test_keep_running); + int main(int argc, char* argv[]) { benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::Initialize(&argc, argv); diff --git a/lib/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc b/lib/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc new file mode 100644 index 00000000000..d4b0bab70e7 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/donotoptimize_assembly_test.cc @@ -0,0 +1,163 @@ +#include <benchmark/benchmark.h> + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +extern "C" { + +extern int ExternInt; +extern int ExternInt2; +extern int ExternInt3; + +inline int Add42(int x) { return x + 42; } + +struct NotTriviallyCopyable { + NotTriviallyCopyable(); + explicit NotTriviallyCopyable(int x) : value(x) {} + NotTriviallyCopyable(NotTriviallyCopyable const&); + int value; +}; + +struct Large { + int value; + int data[2]; +}; + +} +// CHECK-LABEL: test_with_rvalue: +extern "C" void test_with_rvalue() { + benchmark::DoNotOptimize(Add42(0)); + // CHECK: movl $42, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_rvalue: +extern "C" void test_with_large_rvalue() { + benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_rvalue: +extern "C" void test_with_non_trivial_rvalue() { + benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); + // CHECK: mov{{l|q}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_lvalue: +extern "C" void test_with_lvalue() { + int x = 101; + benchmark::DoNotOptimize(x); + // CHECK-GNU: movl $101, %eax + // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_lvalue: +extern "C" void test_with_large_lvalue() { + Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_lvalue: +extern "C" void test_with_non_trivial_lvalue() { + NotTriviallyCopyable NTC(ExternInt); + benchmark::DoNotOptimize(NTC); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_lvalue: +extern "C" void test_with_const_lvalue() { + const int x = 123; + benchmark::DoNotOptimize(x); + // CHECK: movl $123, %eax + // CHECK: ret +} + +// CHECK-LABEL: test_with_large_const_lvalue: +extern "C" void test_with_large_const_lvalue() { + const Large L{ExternInt, {ExternInt, ExternInt}}; + benchmark::DoNotOptimize(L); + // CHECK: ExternInt(%rip) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) + // CHECK: ret +} + +// CHECK-LABEL: test_with_non_trivial_const_lvalue: +extern "C" void test_with_non_trivial_const_lvalue() { + const NotTriviallyCopyable Obj(ExternInt); + benchmark::DoNotOptimize(Obj); + // CHECK: mov{{q|l}} ExternInt(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_div_by_two: +extern "C" int test_div_by_two(int input) { + int divisor = 2; + benchmark::DoNotOptimize(divisor); + return input / divisor; + // CHECK: movl $2, [[DEST:.*]] + // CHECK: idivl [[DEST]] + // CHECK: ret +} + +// CHECK-LABEL: test_inc_integer: +extern "C" int test_inc_integer() { + int x = 0; + for (int i=0; i < 5; ++i) + benchmark::DoNotOptimize(++x); + // CHECK: movl $1, [[DEST:.*]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK: {{(addl \$1,|incl)}} [[DEST]] + // CHECK-CLANG: movl [[DEST]], %eax + // CHECK: ret + return x; +} + +// CHECK-LABEL: test_pointer_rvalue +extern "C" void test_pointer_rvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + benchmark::DoNotOptimize(&x); +} + +// CHECK-LABEL: test_pointer_const_lvalue: +extern "C" void test_pointer_const_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) + // CHECK: ret + int x = 42; + int * const xp = &x; + benchmark::DoNotOptimize(xp); +} + +// CHECK-LABEL: test_pointer_lvalue: +extern "C" void test_pointer_lvalue() { + // CHECK: movl $42, [[DEST:.*]] + // CHECK: leaq [[DEST]], %rax + // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) + // CHECK: ret + int x = 42; + int *xp = &x; + benchmark::DoNotOptimize(xp); +} diff --git a/lib/libcxx/utils/google-benchmark/test/donotoptimize_test.cc b/lib/libcxx/utils/google-benchmark/test/donotoptimize_test.cc index b21187aadc2..2ce92d1c72b 100644 --- a/lib/libcxx/utils/google-benchmark/test/donotoptimize_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/donotoptimize_test.cc @@ -9,16 +9,32 @@ std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); std::uint64_t double_up(const std::uint64_t x) { return x * 2; } } +// Using DoNotOptimize on types like BitRef seem to cause a lot of problems +// with the inline assembly on both GCC and Clang. +struct BitRef { + int index; + unsigned char &byte; + +public: + static BitRef Make() { + static unsigned char arr[2] = {}; + BitRef b(1, arr[0]); + return b; + } +private: + BitRef(int i, unsigned char& b) : index(i), byte(b) {} +}; + int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types - char buffer8[8]; + char buffer8[8] = ""; benchmark::DoNotOptimize(buffer8); - char buffer20[20]; + char buffer20[20] = ""; benchmark::DoNotOptimize(buffer20); - char buffer1024[1024]; + char buffer1024[1024] = ""; benchmark::DoNotOptimize(buffer1024); benchmark::DoNotOptimize(&buffer1024[0]); @@ -29,5 +45,8 @@ int main(int, char*[]) { benchmark::DoNotOptimize(double_up(x)); - return 0; + // These tests are to e + benchmark::DoNotOptimize(BitRef::Make()); + BitRef lval = BitRef::Make(); + benchmark::DoNotOptimize(lval); } diff --git a/lib/libcxx/utils/google-benchmark/test/filter_test.cc b/lib/libcxx/utils/google-benchmark/test/filter_test.cc index 3a205295f09..0e27065c155 100644 --- a/lib/libcxx/utils/google-benchmark/test/filter_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/filter_test.cc @@ -36,31 +36,31 @@ class TestReporter : public benchmark::ConsoleReporter { } // end namespace static void NoPrefix(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(NoPrefix); static void BM_Foo(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Foo); static void BM_Bar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Bar); static void BM_FooBar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBar); static void BM_FooBa(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBa); diff --git a/lib/libcxx/utils/google-benchmark/test/fixture_test.cc b/lib/libcxx/utils/google-benchmark/test/fixture_test.cc index bbc2f957902..1462b10f02f 100644 --- a/lib/libcxx/utils/google-benchmark/test/fixture_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/fixture_test.cc @@ -28,7 +28,7 @@ class MyFixture : public ::benchmark::Fixture { BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { assert(data.get() != nullptr); assert(*data == 42); - while (st.KeepRunning()) { + for (auto _ : st) { } } @@ -37,7 +37,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { assert(data.get() != nullptr); assert(*data == 42); } - while (st.KeepRunning()) { + for (auto _ : st) { assert(data.get() != nullptr); assert(*data == 42); } @@ -46,4 +46,4 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/link_main_test.cc b/lib/libcxx/utils/google-benchmark/test/link_main_test.cc new file mode 100644 index 00000000000..241ad5c3905 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/link_main_test.cc @@ -0,0 +1,8 @@ +#include "benchmark/benchmark.h" + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); diff --git a/lib/libcxx/utils/google-benchmark/test/map_test.cc b/lib/libcxx/utils/google-benchmark/test/map_test.cc index 83457c9981c..dbf7982a368 100644 --- a/lib/libcxx/utils/google-benchmark/test/map_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/map_test.cc @@ -8,7 +8,7 @@ namespace { std::map<int, int> ConstructRandomMap(int size) { std::map<int, int> m; for (int i = 0; i < size; ++i) { - m.insert(std::make_pair(rand() % size, rand() % size)); + m.insert(std::make_pair(std::rand() % size, std::rand() % size)); } return m; } @@ -17,13 +17,14 @@ std::map<int, int> ConstructRandomMap(int size) { // Basic version. static void BM_MapLookup(benchmark::State& state) { - const int size = state.range(0); - while (state.KeepRunning()) { + const int size = static_cast<int>(state.range(0)); + std::map<int, int> m; + for (auto _ : state) { state.PauseTiming(); - std::map<int, int> m = ConstructRandomMap(size); + m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); + benchmark::DoNotOptimize(m.find(std::rand() % size)); } } state.SetItemsProcessed(state.iterations() * size); @@ -34,7 +35,7 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); class MapFixture : public ::benchmark::Fixture { public: void SetUp(const ::benchmark::State& st) { - m = ConstructRandomMap(st.range(0)); + m = ConstructRandomMap(static_cast<int>(st.range(0))); } void TearDown(const ::benchmark::State&) { m.clear(); } @@ -43,14 +44,14 @@ class MapFixture : public ::benchmark::Fixture { }; BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { - const int size = state.range(0); - while (state.KeepRunning()) { + const int size = static_cast<int>(state.range(0)); + for (auto _ : state) { for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); + benchmark::DoNotOptimize(m.find(std::rand() % size)); } } state.SetItemsProcessed(state.iterations() * size); } BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc b/lib/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc index 8e67b3b2a99..c64acabc25c 100644 --- a/lib/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/multiple_ranges_test.cc @@ -1,7 +1,9 @@ #include "benchmark/benchmark.h" #include <cassert> +#include <iostream> #include <set> +#include <vector> class MultipleRangesFixture : public ::benchmark::Fixture { public: @@ -27,25 +29,46 @@ class MultipleRangesFixture : public ::benchmark::Fixture { {7, 6, 3}}) {} void SetUp(const ::benchmark::State& state) { - std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)}; + std::vector<int64_t> ranges = {state.range(0), state.range(1), + state.range(2)}; assert(expectedValues.find(ranges) != expectedValues.end()); actualValues.insert(ranges); } + // NOTE: This is not TearDown as we want to check after _all_ runs are + // complete. virtual ~MultipleRangesFixture() { assert(actualValues.size() == expectedValues.size()); + if (actualValues.size() != expectedValues.size()) { + std::cout << "EXPECTED\n"; + for (auto v : expectedValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + std::cout << "ACTUAL\n"; + for (auto v : actualValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + } } - std::set<std::vector<int>> expectedValues; - std::set<std::vector<int>> actualValues; + std::set<std::vector<int64_t>> expectedValues; + std::set<std::vector<int64_t>> actualValues; }; BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { - while (state.KeepRunning()) { - int product = state.range(0) * state.range(1) * state.range(2); - for (int x = 0; x < product; x++) { + for (auto _ : state) { + int64_t product = state.range(0) * state.range(1) * state.range(2); + for (int64_t x = 0; x < product; x++) { benchmark::DoNotOptimize(x); } } @@ -60,15 +83,15 @@ void BM_CheckDefaultArgument(benchmark::State& state) { // Test that the 'range()' without an argument is the same as 'range(0)'. assert(state.range() == state.range(0)); assert(state.range() != state.range(1)); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); static void BM_MultipleRanges(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/options_test.cc b/lib/libcxx/utils/google-benchmark/test/options_test.cc index bbbed288398..fdec69174ee 100644 --- a/lib/libcxx/utils/google-benchmark/test/options_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/options_test.cc @@ -1,4 +1,4 @@ -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #include <chrono> #include <thread> @@ -8,13 +8,13 @@ #include <cassert> void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } void BM_basic_slow(benchmark::State& state) { std::chrono::milliseconds sleep_duration(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::this_thread::sleep_for( std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)); } @@ -44,7 +44,7 @@ void CustomArgs(benchmark::internal::Benchmark* b) { BENCHMARK(BM_basic)->Apply(CustomArgs); -void BM_explicit_iteration_count(benchmark::State& st) { +void BM_explicit_iteration_count(benchmark::State& state) { // Test that benchmarks specified with an explicit iteration count are // only run once. static bool invoked_before = false; @@ -52,14 +52,14 @@ void BM_explicit_iteration_count(benchmark::State& st) { invoked_before = true; // Test that the requested iteration count is respected. - assert(st.max_iterations == 42); + assert(state.max_iterations == 42); size_t actual_iterations = 0; - while (st.KeepRunning()) + for (auto _ : state) ++actual_iterations; - assert(st.iterations() == st.max_iterations); - assert(st.iterations() == 42); + assert(state.iterations() == state.max_iterations); + assert(state.iterations() == 42); } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); -BENCHMARK_MAIN() +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/output_test.h b/lib/libcxx/utils/google-benchmark/test/output_test.h index 57d4397ad5d..31a919991f7 100644 --- a/lib/libcxx/utils/google-benchmark/test/output_test.h +++ b/lib/libcxx/utils/google-benchmark/test/output_test.h @@ -2,8 +2,10 @@ #define TEST_OUTPUT_TEST_H #undef NDEBUG +#include <functional> #include <initializer_list> #include <memory> +#include <sstream> #include <string> #include <utility> #include <vector> @@ -59,6 +61,139 @@ int SetSubstitutions( void RunOutputTests(int argc, char* argv[]); // ========================================================================= // +// ------------------------- Results checking ------------------------------ // +// ========================================================================= // + +// Call this macro to register a benchmark for checking its results. This +// should be all that's needed. It subscribes a function to check the (CSV) +// results of a benchmark. This is done only after verifying that the output +// strings are really as expected. +// bm_name_pattern: a name or a regex pattern which will be matched against +// all the benchmark names. Matching benchmarks +// will be the subject of a call to checker_function +// checker_function: should be of type ResultsCheckFn (see below) +#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + +struct Results; +typedef std::function<void(Results const&)> ResultsCheckFn; + +size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); + +// Class holding the results of a benchmark. +// It is passed in calls to checker functions. +struct Results { + // the benchmark name + std::string name; + // the benchmark fields + std::map<std::string, std::string> values; + + Results(const std::string& n) : name(n) {} + + int NumThreads() const; + + double NumIterations() const; + + typedef enum { kCpuTime, kRealTime } BenchmarkTime; + + // get cpu_time or real_time in seconds + double GetTime(BenchmarkTime which) const; + + // get the real_time duration of the benchmark in seconds. + // it is better to use fuzzy float checks for this, as the float + // ASCII formatting is lossy. + double DurationRealTime() const { + return NumIterations() * GetTime(kRealTime); + } + // get the cpu_time duration of the benchmark in seconds + double DurationCPUTime() const { + return NumIterations() * GetTime(kCpuTime); + } + + // get the string for a result by name, or nullptr if the name + // is not found + const std::string* Get(const char* entry_name) const { + auto it = values.find(entry_name); + if (it == values.end()) return nullptr; + return &it->second; + } + + // get a result by name, parsed as a specific type. + // NOTE: for counters, use GetCounterAs instead. + template <class T> + T GetAs(const char* entry_name) const; + + // counters are written as doubles, so they have to be read first + // as a double, and only then converted to the asked type. + template <class T> + T GetCounterAs(const char* entry_name) const { + double dval = GetAs<double>(entry_name); + T tval = static_cast<T>(dval); + return tval; + } +}; + +template <class T> +T Results::GetAs(const char* entry_name) const { + auto* sv = Get(entry_name); + CHECK(sv != nullptr && !sv->empty()); + std::stringstream ss; + ss << *sv; + T out; + ss >> out; + CHECK(!ss.fail()); + return out; +} + +//---------------------------------- +// Macros to help in result checking. Do not use them with arguments causing +// side-effects. + +// clang-format off + +#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(CHECK_, relationship) \ + (entry.getfn< var_type >(var_name), (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" + +// check with tolerance. eps_factor is the tolerance window, which is +// interpreted relative to value (eg, 0.1 means 10% of value). +#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(CHECK_FLOAT_, relationship) \ + (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "with tolerance of " << (eps_factor) * (value) \ + << " (" << (eps_factor)*100. << "%), " \ + << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ + << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ + / \ + ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ + << "%)" + +#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + +#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + +#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + +#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + +// clang-format on + +// ========================================================================= // // --------------------------- Misc Utilities ------------------------------ // // ========================================================================= // diff --git a/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc b/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc index 54c028a67ba..394c4f5d1a2 100644 --- a/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc +++ b/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc @@ -1,8 +1,10 @@ +#include <cstring> #include <iostream> #include <map> #include <memory> #include <sstream> +#include "../src/benchmark_api_internal.h" #include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" @@ -31,22 +33,32 @@ TestCaseList& GetTestCaseList(TestCaseID ID) { SubMap& GetSubstitutions() { // Don't use 'dec_re' from header because it may not yet be initialized. + // clang-format off static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, + // human-readable float + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, - {"%time", "[ ]*[0-9]{1,5} ns"}, - {"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"}, + {"%time", "[ ]*[0-9]{1,6} ns"}, + {"%console_report", "[ ]*[0-9]{1,6} ns [ ]*[0-9]{1,6} ns [ ]*[0-9]+"}, {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, + {"%csv_header", + "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," + "items_per_second,label,error_occurred,error_message"}, {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, {"%csv_bytes_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, {"%csv_items_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, + {"%csv_bytes_items_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + + "," + safe_dec_re + ",,,"}, {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_end", ",,"}}; + // clang-format on return map; } @@ -137,11 +149,182 @@ class TestReporter : public benchmark::BenchmarkReporter { } private: - std::vector<benchmark::BenchmarkReporter *> reporters_; + std::vector<benchmark::BenchmarkReporter*> reporters_; }; +} // namespace + +} // end namespace internal + +// ========================================================================= // +// -------------------------- Results checking ----------------------------- // +// ========================================================================= // + +namespace internal { + +// Utility class to manage subscribers for checking benchmark results. +// It works by parsing the CSV output to read the results. +class ResultsChecker { + public: + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + PatternAndFn(const std::string& rx, ResultsCheckFn fn_) + : TestCase(rx), fn(fn_) {} + ResultsCheckFn fn; + }; + + std::vector<PatternAndFn> check_patterns; + std::vector<Results> results; + std::vector<std::string> field_names; + + void Add(const std::string& entry_pattern, ResultsCheckFn fn); + + void CheckResults(std::stringstream& output); + + private: + void SetHeader_(const std::string& csv_header); + void SetValues_(const std::string& entry_csv_line); + + std::vector<std::string> SplitCsv_(const std::string& line); +}; + +// store the static ResultsChecker in a function to prevent initialization +// order problems +ResultsChecker& GetResultsChecker() { + static ResultsChecker rc; + return rc; +} + +// add a results checker for a benchmark +void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { + check_patterns.emplace_back(entry_pattern, fn); +} + +// check the results of all subscribed benchmarks +void ResultsChecker::CheckResults(std::stringstream& output) { + // first reset the stream to the start + { + auto start = std::ios::streampos(0); + // clear before calling tellg() + output.clear(); + // seek to zero only when needed + if (output.tellg() > start) output.seekg(start); + // and just in case + output.clear(); + } + // now go over every line and publish it to the ResultsChecker + std::string line; + bool on_first = true; + while (output.eof() == false) { + CHECK(output.good()); + std::getline(output, line); + if (on_first) { + SetHeader_(line); // this is important + on_first = false; + continue; + } + SetValues_(line); + } + // finally we can call the subscribed check functions + for (const auto& p : check_patterns) { + VLOG(2) << "--------------------------------\n"; + VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + for (const auto& r : results) { + if (!p.regex->Match(r.name)) { + VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + continue; + } else { + VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + } + VLOG(1) << "Checking results of " << r.name << ": ... \n"; + p.fn(r); + VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + } + } +} + +// prepare for the names in this header +void ResultsChecker::SetHeader_(const std::string& csv_header) { + field_names = SplitCsv_(csv_header); +} + +// set the values for a benchmark +void ResultsChecker::SetValues_(const std::string& entry_csv_line) { + if (entry_csv_line.empty()) return; // some lines are empty + CHECK(!field_names.empty()); + auto vals = SplitCsv_(entry_csv_line); + CHECK_EQ(vals.size(), field_names.size()); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto& entry = results.back(); + for (size_t i = 1, e = vals.size(); i < e; ++i) { + entry.values[field_names[i]] = vals[i]; + } +} + +// a quick'n'dirty csv splitter (eliminating quotes) +std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) { + std::vector<std::string> out; + if (line.empty()) return out; + if (!field_names.empty()) out.reserve(field_names.size()); + size_t prev = 0, pos = line.find_first_of(','), curr = pos; + while (pos != line.npos) { + CHECK(curr > 0); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); + prev = pos + 1; + pos = line.find_first_of(',', pos + 1); + curr = pos; + } + curr = line.size(); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); + return out; } + } // end namespace internal +size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { + auto& rc = internal::GetResultsChecker(); + rc.Add(bm_name, fn); + return rc.results.size(); +} + +int Results::NumThreads() const { + auto pos = name.find("/threads:"); + if (pos == name.npos) return 1; + auto end = name.find('/', pos + 9); + std::stringstream ss; + ss << name.substr(pos + 9, end); + int num = 1; + ss >> num; + CHECK(!ss.fail()); + return num; +} + +double Results::NumIterations() const { + return GetAs<double>("iterations"); +} + +double Results::GetTime(BenchmarkTime which) const { + CHECK(which == kCpuTime || which == kRealTime); + const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs<double>(which_str); + auto unit = Get("time_unit"); + CHECK(unit); + if (*unit == "ns") { + return val * 1.e-9; + } else if (*unit == "us") { + return val * 1.e-6; + } else if (*unit == "ms") { + return val * 1.e-3; + } else if (*unit == "s") { + return val; + } else { + CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; + } +} + // ========================================================================= // // -------------------------- Public API Definitions------------------------ // // ========================================================================= // @@ -152,7 +335,7 @@ TestCase::TestCase(std::string re, int rule) substituted_regex(internal::PerformSubstitutions(regex_str)), regex(std::make_shared<benchmark::Regex>()) { std::string err_str; - regex->Init(substituted_regex,& err_str); + regex->Init(substituted_regex, &err_str); CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\"" << "\n originally \"" << regex_str << "\"" @@ -186,7 +369,8 @@ int SetSubstitutions( void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); - benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); + benchmark::ConsoleReporter CR(options); benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; struct ReporterTest { @@ -231,4 +415,11 @@ void RunOutputTests(int argc, char* argv[]) { std::cout << "\n"; } + + // now that we know the output is as expected, we can dispatch + // the checks to subscribees. + auto& csv = TestCases[2]; + // would use == but gcc spits a warning + CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + internal::GetResultsChecker().CheckResults(csv.out_stream); } diff --git a/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc b/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc index e9f8ea530c1..18de6d68e21 100644 --- a/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc @@ -29,6 +29,7 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { + // clang-format off CHECK(name == run.benchmark_name) << "expected " << name << " got " << run.benchmark_name; if (label) { @@ -37,6 +38,7 @@ struct TestCase { } else { CHECK(run.report_label == ""); } + // clang-format on } }; @@ -61,7 +63,7 @@ typedef benchmark::internal::Benchmark* ReturnVal; // Test RegisterBenchmark with no additional arguments //----------------------------------------------------------------------------// void BM_function(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_function); @@ -77,7 +79,7 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK void BM_extra_args(benchmark::State& st, const char* label) { - while (st.KeepRunning()) { + for (auto _ : st) { } st.SetLabel(label); } @@ -99,7 +101,7 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); struct CustomFixture { void operator()(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } }; @@ -114,23 +116,23 @@ void TestRegistrationAtRuntime() { #endif #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK { - int x = 42; + const char* x = "42"; auto capturing_lam = [=](benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } - st.SetLabel(std::to_string(x)); + st.SetLabel(x); }; benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); - AddCases({{"lambda_benchmark", "42"}}); + AddCases({{"lambda_benchmark", x}}); } #endif } -int main(int argc, char* argv[]) { +// Test that all benchmarks, registered at either during static init or runtime, +// are run and the results are passed to the reported. +void RunTestOne() { TestRegistrationAtRuntime(); - benchmark::Initialize(&argc, argv); - TestReporter test_reporter; benchmark::RunSpecifiedBenchmarks(&test_reporter); @@ -143,6 +145,40 @@ int main(int argc, char* argv[]) { ++EB; } assert(EB == ExpectedResults.end()); +} - return 0; +// Test that ClearRegisteredBenchmarks() clears all previously registered +// benchmarks. +// Also test that new benchmarks can be registered and ran afterwards. +void RunTestTwo() { + assert(ExpectedResults.size() != 0 && + "must have at least one registered benchmark"); + ExpectedResults.clear(); + benchmark::ClearRegisteredBenchmarks(); + + TestReporter test_reporter; + size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == 0); + assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); + + TestRegistrationAtRuntime(); + num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == ExpectedResults.size()); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); +} + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + RunTestOne(); + RunTestTwo(); } diff --git a/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc index cb52aec0c08..1662fcb8b54 100644 --- a/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc @@ -9,20 +9,55 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // -ADD_CASES(TC_ConsoleOut, - {{"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, - {"^[-]+$", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label,error_occurred,error_message"}}); +ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, + {"^[-]+$", MR_Next}}); +static int AddContextCases() { + AddCases(TC_ConsoleErr, + { + {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, + {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, + {"Run on \\(%int X %float MHz CPU s\\)", MR_Next}, + }); + AddCases(TC_JSONOut, + {{"^\\{", MR_Default}, + {"\"context\":", MR_Next}, + {"\"date\": \"", MR_Next}, + {"\"executable\": \".*/reporter_output_test(\\.exe)?\",", MR_Next}, + {"\"num_cpus\": %int,$", MR_Next}, + {"\"mhz_per_cpu\": %float,$", MR_Next}, + {"\"cpu_scaling_enabled\": ", MR_Next}, + {"\"caches\": \\[$", MR_Next}}); + auto const& Caches = benchmark::CPUInfo::Get().caches; + if (!Caches.empty()) { + AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); + } + for (size_t I = 0; I < Caches.size(); ++I) { + std::string num_caches_str = + Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; + AddCases( + TC_ConsoleErr, + {{"L%int (Data|Instruction|Unified) %intK" + num_caches_str, MR_Next}}); + AddCases(TC_JSONOut, {{"\\{$", MR_Next}, + {"\"type\": \"", MR_Next}, + {"\"level\": %int,$", MR_Next}, + {"\"size\": %int,$", MR_Next}, + {"\"num_sharing\": %int$", MR_Next}, + {"}[,]{0,1}$", MR_Next}}); + } + + AddCases(TC_JSONOut, {{"],$"}}); + return 0; +} +int dummy_register = AddContextCases(); +ADD_CASES(TC_CSVOut, {{"%csv_header"}}); // ========================================================================= // // ------------------------ Testing Basic Output --------------------------- // // ========================================================================= // void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_basic); @@ -30,8 +65,8 @@ BENCHMARK(BM_basic); ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); @@ -41,20 +76,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); // ========================================================================= // void BM_bytes_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetBytesProcessed(1); } BENCHMARK(BM_bytes_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_bytes_per_second %console_report +%floatB/s$"}}); + {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %int$", MR_Next}, + {"\"bytes_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); @@ -63,20 +98,20 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); // ========================================================================= // void BM_items_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetItemsProcessed(1); } BENCHMARK(BM_items_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_items_per_second %console_report +%float items/s$"}}); + {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"items_per_second\": %int$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); @@ -85,7 +120,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); // ========================================================================= // void BM_label(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetLabel("some label"); } @@ -94,8 +129,8 @@ BENCHMARK(BM_label); ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"label\": \"some label\"$", MR_Next}, {"}", MR_Next}}); @@ -108,7 +143,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some " void BM_error(benchmark::State& state) { state.SkipWithError("message"); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_error); @@ -125,7 +160,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}}); // ========================================================================= // void BM_no_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_no_arg_name)->Arg(3); @@ -138,7 +173,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); // ========================================================================= // void BM_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); @@ -151,7 +186,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); // ========================================================================= // void BM_arg_names(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); @@ -165,7 +200,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetComplexityN(state.range(0)); } @@ -181,30 +216,74 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, // Test that non-aggregate data is printed by default void BM_Repeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } +// need two repetitions min to be able to output any aggregate output +BENCHMARK(BM_Repeat)->Repetitions(2); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2_mean %console_report$"}, + {"^BM_Repeat/repeats:2_median %console_report$"}, + {"^BM_Repeat/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); +// but for two repetitions, mean and median is the same, so let's repeat.. BENCHMARK(BM_Repeat)->Repetitions(3); ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3_mean %console_report$"}, + {"^BM_Repeat/repeats:3_median %console_report$"}, {"^BM_Repeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_median\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); +// median differs between even/odd number of repetitions, so just to be sure +BENCHMARK(BM_Repeat)->Repetitions(4); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4_mean %console_report$"}, + {"^BM_Repeat/repeats:4_median %console_report$"}, + {"^BM_Repeat/repeats:4_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}}); // Test that a non-repeated test still prints non-aggregate results even when // only-aggregate reports have been requested void BM_RepeatOnce(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); @@ -214,23 +293,26 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); // Test that non-aggregate data is not reported void BM_SummaryRepeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^BM_SummaryRepeat/repeats:3_mean %console_report$"}, + {"^BM_SummaryRepeat/repeats:3_median %console_report$"}, {"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); void BM_RepeatTimeUnit(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatTimeUnit) @@ -240,18 +322,63 @@ BENCHMARK(BM_RepeatTimeUnit) ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"}, + {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"}, {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, + {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); // ========================================================================= // +// -------------------- Testing user-provided statistics ------------------- // +// ========================================================================= // + +const auto UserStatistics = [](const std::vector<double>& v) { + return v.back(); +}; +void BM_UserStats(benchmark::State& state) { + for (auto _ : state) { + } +} +// clang-format off +BENCHMARK(BM_UserStats) + ->Repetitions(3) + ->ComputeStatistics("", UserStatistics); +// clang-format on + +// check that user-provided stats is calculated, and is after the default-ones +// empty string as name is intentional, it would sort before anything else +ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3_mean %console_report$"}, + {"^BM_UserStats/repeats:3_median %console_report$"}, + {"^BM_UserStats/repeats:3_stddev %console_report$"}, + {"^BM_UserStats/repeats:3_ %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_mean\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_median\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_median\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_\",%csv_report$"}}); + +// ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // // ========================================================================= // diff --git a/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc b/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc index b74d33c5899..39785fb7f6d 100644 --- a/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc +++ b/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc @@ -33,8 +33,8 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name) << "expected " << name << " got " - << run.benchmark_name; + CHECK(name == run.benchmark_name) + << "expected " << name << " got " << run.benchmark_name; CHECK(error_occurred == run.error_occurred); CHECK(error_message == run.error_message); if (error_occurred) { @@ -70,6 +70,24 @@ void BM_error_before_running(benchmark::State& state) { BENCHMARK(BM_error_before_running); ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); +void BM_error_before_running_batch(benchmark::State& state) { + state.SkipWithError("error message"); + while (state.KeepRunningBatch(17)) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_batch); +ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); + +void BM_error_before_running_range_for(benchmark::State& state) { + state.SkipWithError("error message"); + for (auto _ : state) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_range_for); +ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); + void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { @@ -93,8 +111,29 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, {"/2/threads:4", false, ""}, {"/2/threads:8", false, ""}}); +void BM_error_during_running_ranged_for(benchmark::State& state) { + assert(state.max_iterations > 3 && "test requires at least a few iterations"); + int first_iter = true; + // NOTE: Users should not write the for loop explicitly. + for (auto It = state.begin(), End = state.end(); It != End; ++It) { + if (state.range(0) == 1) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + // Test the unfortunate but documented behavior that the ranged-for loop + // doesn't automatically terminate when SkipWithError is set. + assert(++It != End); + break; // Required behavior + } + } +} +BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); +ADD_CASES("BM_error_during_running_ranged_for", + {{"/1/iterations:5", true, "error message"}, + {"/2/iterations:5", false, ""}}); + void BM_error_after_running(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } if (state.thread_index <= (state.threads / 2)) diff --git a/lib/libcxx/utils/google-benchmark/test/state_assembly_test.cc b/lib/libcxx/utils/google-benchmark/test/state_assembly_test.cc new file mode 100644 index 00000000000..abe9a4ddb56 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/state_assembly_test.cc @@ -0,0 +1,68 @@ +#include <benchmark/benchmark.h> + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wreturn-type" +#endif + +// clang-format off +extern "C" { + extern int ExternInt; + benchmark::State& GetState(); + void Fn(); +} +// clang-format on + +using benchmark::State; + +// CHECK-LABEL: test_for_auto_loop: +extern "C" int test_for_auto_loop() { + State& S = GetState(); + int x = 42; + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + // CHECK-NEXT: testq %rbx, %rbx + // CHECK-NEXT: je [[LOOP_END:.*]] + + for (auto _ : S) { + // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: + // CHECK-GNU-NEXT: subq $1, %rbx + // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax + // CHECK-NEXT: jne .L[[LOOP_HEAD]] + benchmark::DoNotOptimize(x); + } + // CHECK: [[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} + +// CHECK-LABEL: test_while_loop: +extern "C" int test_while_loop() { + State& S = GetState(); + int x = 42; + + // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] + // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: + while (S.KeepRunning()) { + // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] + // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] + // CHECK: movq %[[IREG]], [[DEST:.*]] + benchmark::DoNotOptimize(x); + } + // CHECK-DAG: movq [[DEST]], %[[IREG]] + // CHECK-DAG: testq %[[IREG]], %[[IREG]] + // CHECK-DAG: jne .L[[LOOP_BODY]] + // CHECK-DAG: .L[[LOOP_HEADER]]: + + // CHECK: cmpb $0 + // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] + // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv + + // CHECK: .L[[LOOP_END]]: + // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv + + // CHECK: movl $101, %eax + // CHECK: ret + return 101; +} diff --git a/lib/libcxx/utils/google-benchmark/test/statistics_gtest.cc b/lib/libcxx/utils/google-benchmark/test/statistics_gtest.cc new file mode 100644 index 00000000000..99e314920c5 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/statistics_gtest.cc @@ -0,0 +1,28 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/statistics.h" +#include "gtest/gtest.h" + +namespace { +TEST(StatisticsTest, Mean) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); +} + +TEST(StatisticsTest, Median) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); +} + +TEST(StatisticsTest, StdDev) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); + EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}), + 1.42302495); +} + +} // end namespace diff --git a/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc b/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc new file mode 100644 index 00000000000..4c81734cf8a --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc @@ -0,0 +1,146 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/string_util.h" +#include "gtest/gtest.h" + +namespace { +TEST(StringUtilTest, stoul) { + { + size_t pos = 0; + EXPECT_EQ(0, benchmark::stoul("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(7, benchmark::stoul("7", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(135, benchmark::stoul("135", &pos)); + EXPECT_EQ(3, pos); + } +#if ULONG_MAX == 0xFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); + EXPECT_EQ(10, pos); + } +#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(20, pos); + } +#endif + { + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); + } +} + +TEST(StringUtilTest, stoi) { + { + size_t pos = 0; + EXPECT_EQ(0, benchmark::stoi("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4, pos); + } + { + ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); + } +} + +TEST(StringUtilTest, stod) { + { + size_t pos = 0; + EXPECT_EQ(0.0, benchmark::stod("0", &pos)); + EXPECT_EQ(1, pos); + } + { + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3, pos); + } + { + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8, pos); + } + { + ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); + } +} + +} // end namespace diff --git a/lib/libcxx/utils/google-benchmark/test/templated_fixture_test.cc b/lib/libcxx/utils/google-benchmark/test/templated_fixture_test.cc new file mode 100644 index 00000000000..fe9865cc776 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/templated_fixture_test.cc @@ -0,0 +1,28 @@ + +#include "benchmark/benchmark.h" + +#include <cassert> +#include <memory> + +template <typename T> +class MyFixture : public ::benchmark::Fixture { + public: + MyFixture() : data(0) {} + + T data; +}; + +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { + for (auto _ : st) { + data += 1; + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { + for (auto _ : st) { + data += 1.0; + } +} +BENCHMARK_REGISTER_F(MyFixture, Bar); + +BENCHMARK_MAIN(); diff --git a/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc b/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc new file mode 100644 index 00000000000..4f126b6d978 --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc @@ -0,0 +1,252 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// @todo: <jpmag> this checks the full output at once; the rule for +// CounterSet1 was failing because it was not matching "^[-]+$". +// @todo: <jpmag> check that the counters are vertically aligned. +ADD_CASES( + TC_ConsoleOut, + { + // keeping these lines long improves readability, so: + // clang-format off + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, + // clang-format on + }); +ADD_CASES(TC_CSVOut, {{"%csv_header," + "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +void BM_Counters_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {1, bm::Counter::kAvgThreads}}, + {"Bar", {2, bm::Counter::kAvgThreads}}, + {"Baz", {4, bm::Counter::kAvgThreads}}, + {"Bat", {8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabular(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); + CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); + CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); + +// ========================================================================= // +// -------------------- Tabular+Rate Counters Output ----------------------- // +// ========================================================================= // + +void BM_CounterRates_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {1, bm::Counter::kAvgThreadsRate}}, + {"Bar", {2, bm::Counter::kAvgThreadsRate}}, + {"Baz", {4, bm::Counter::kAvgThreadsRate}}, + {"Bat", {8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + }); +} +BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabularRate(Results const& e) { + double t = e.DurationCPUTime(); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", + &CheckTabularRate); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters +void BM_CounterSet0_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet0(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); + +// again. +void BM_CounterSet1_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet1(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters, different set now. +void BM_CounterSet2_Tabular(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," + ",%float,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet2(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc b/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc new file mode 100644 index 00000000000..7f7ccb9f77a --- /dev/null +++ b/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc @@ -0,0 +1,380 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ---------------------- Testing Prologue Output -------------------------- // +// ========================================================================= // + +// clang-format off + +ADD_CASES(TC_ConsoleOut, + {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, + {"^[-]+$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); + +// clang-format on + +// ========================================================================= // +// ------------------------- Simple Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Simple(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2 * (double)state.iterations(); +} +BENCHMARK(BM_Counters_Simple); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSimple(Results const& e) { + double its = e.NumIterations(); + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + // check that the value of bar is within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); + +// ========================================================================= // +// --------------------- Counters+Items+Bytes/s Output --------------------- // +// ========================================================================= // + +namespace { +int num_calls1 = 0; +} +void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = ++num_calls1; + state.SetBytesProcessed(364); + state.SetItemsProcessed(150); +} +BENCHMARK(BM_Counters_WithBytesAndItemsPSec); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_WithBytesAndItemsPSec %console_report " + "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bytes_per_second\": %float,$", MR_Next}, + {"\"items_per_second\": %float,$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," + "%csv_bytes_items_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckBytesAndItemsPSec(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", + &CheckBytesAndItemsPSec); + +// ========================================================================= // +// ------------------------- Rate Counters Output -------------------------- // +// ========================================================================= // + +void BM_Counters_Rate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; +} +BENCHMARK(BM_Counters_Rate); +ADD_CASES( + TC_ConsoleOut, + {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); + +// ========================================================================= // +// ------------------------- Thread Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Threads(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2; +} +BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreads(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; +} +BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " + "%console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", + &CheckAvgThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreadsRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; +} +BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" + "threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreadsRate(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", + &CheckAvgThreadsRate); + +// ========================================================================= // +// ------------------- IterationInvariant Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_IterationInvariant(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_IterationInvariant); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIterationInvariant(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", + &CheckIterationInvariant); + +// ========================================================================= // +// ----------------- IterationInvariantRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_kIsIterationInvariantRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIsIterationInvariantRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", + &CheckIsIterationInvariantRate); + +// ========================================================================= // +// ------------------- AvgIterations Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_AvgIterations(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_AvgIterations); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterations(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); + +// ========================================================================= // +// ----------------- AvgIterationsRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kAvgIterationsRate(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_kAvgIterationsRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterationsRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", + &CheckAvgIterationsRate); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } |