summaryrefslogtreecommitdiffstats
path: root/lib/libcxx/utils
diff options
context:
space:
mode:
authorpatrick <patrick@openbsd.org>2019-06-17 22:18:29 +0000
committerpatrick <patrick@openbsd.org>2019-06-17 22:18:29 +0000
commit504b10ec5101b237e4c07e1f2de4b6c48138181e (patch)
tree979c9ce8ab11efd05e4413305758dc5d6bc76ab4 /lib/libcxx/utils
parentA bit more KNF no binary change (diff)
downloadwireguard-openbsd-504b10ec5101b237e4c07e1f2de4b6c48138181e.tar.xz
wireguard-openbsd-504b10ec5101b237e4c07e1f2de4b6c48138181e.zip
Import libc++ 8.0.0.
Diffstat (limited to 'lib/libcxx/utils')
-rwxr-xr-xlib/libcxx/utils/ci/macos-backdeployment.sh180
-rwxr-xr-xlib/libcxx/utils/ci/macos-trunk.sh153
-rwxr-xr-xlib/libcxx/utils/docker/build_docker_image.sh109
-rw-r--r--lib/libcxx/utils/docker/debian9/Dockerfile115
-rwxr-xr-xlib/libcxx/utils/docker/scripts/build_gcc.sh91
-rwxr-xr-xlib/libcxx/utils/docker/scripts/build_install_llvm.sh114
-rwxr-xr-xlib/libcxx/utils/docker/scripts/checkout_git.sh130
-rwxr-xr-xlib/libcxx/utils/docker/scripts/docker_start_buildbots.sh8
-rwxr-xr-xlib/libcxx/utils/docker/scripts/install_clang_packages.sh64
-rwxr-xr-xlib/libcxx/utils/docker/scripts/run_buildbot.sh62
-rw-r--r--lib/libcxx/utils/google-benchmark/.clang-format5
-rw-r--r--lib/libcxx/utils/google-benchmark/.gitignore12
-rw-r--r--lib/libcxx/utils/google-benchmark/.travis-libcxx-setup.sh28
-rw-r--r--lib/libcxx/utils/google-benchmark/.travis.yml199
-rw-r--r--lib/libcxx/utils/google-benchmark/.ycm_extra_conf.py115
-rw-r--r--lib/libcxx/utils/google-benchmark/AUTHORS1
-rw-r--r--lib/libcxx/utils/google-benchmark/CMakeLists.txt19
-rw-r--r--lib/libcxx/utils/google-benchmark/CONTRIBUTORS2
-rw-r--r--lib/libcxx/utils/google-benchmark/README.md170
-rw-r--r--lib/libcxx/utils/google-benchmark/WORKSPACE7
-rw-r--r--lib/libcxx/utils/google-benchmark/appveyor.yml50
-rw-r--r--lib/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake10
-rw-r--r--lib/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake2
-rw-r--r--lib/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake8
-rw-r--r--lib/libcxx/utils/google-benchmark/docs/tools.md99
-rw-r--r--lib/libcxx/utils/google-benchmark/include/benchmark/benchmark.h172
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark.cc303
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc15
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.h11
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark_register.cc41
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark_runner.cc350
-rw-r--r--lib/libcxx/utils/google-benchmark/src/benchmark_runner.h51
-rw-r--r--lib/libcxx/utils/google-benchmark/src/colorprint.cc2
-rw-r--r--lib/libcxx/utils/google-benchmark/src/complexity.cc17
-rw-r--r--lib/libcxx/utils/google-benchmark/src/console_reporter.cc54
-rw-r--r--lib/libcxx/utils/google-benchmark/src/csv_reporter.cc14
-rw-r--r--lib/libcxx/utils/google-benchmark/src/cycleclock.h2
-rw-r--r--lib/libcxx/utils/google-benchmark/src/internal_macros.h14
-rw-r--r--lib/libcxx/utils/google-benchmark/src/json_reporter.cc45
-rw-r--r--lib/libcxx/utils/google-benchmark/src/reporter.cc20
-rw-r--r--lib/libcxx/utils/google-benchmark/src/sleep.cc2
-rw-r--r--lib/libcxx/utils/google-benchmark/src/statistics.cc37
-rw-r--r--lib/libcxx/utils/google-benchmark/src/string_util.h6
-rw-r--r--lib/libcxx/utils/google-benchmark/src/sysinfo.cc73
-rw-r--r--lib/libcxx/utils/google-benchmark/src/thread_manager.h2
-rw-r--r--lib/libcxx/utils/google-benchmark/src/timers.cc6
-rw-r--r--lib/libcxx/utils/google-benchmark/test/AssemblyTests.cmake46
-rw-r--r--lib/libcxx/utils/google-benchmark/test/CMakeLists.txt12
-rw-r--r--lib/libcxx/utils/google-benchmark/test/complexity_test.cc39
-rw-r--r--lib/libcxx/utils/google-benchmark/test/display_aggregates_only_test.cc43
-rw-r--r--lib/libcxx/utils/google-benchmark/test/memory_manager_test.cc42
-rw-r--r--lib/libcxx/utils/google-benchmark/test/output_test.h7
-rw-r--r--lib/libcxx/utils/google-benchmark/test/output_test_helper.cc88
-rw-r--r--lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc4
-rw-r--r--lib/libcxx/utils/google-benchmark/test/report_aggregates_only_test.cc39
-rw-r--r--lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc365
-rw-r--r--lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc4
-rw-r--r--lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc62
-rw-r--r--lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc118
-rw-r--r--lib/libcxx/utils/google-benchmark/test/user_counters_test.cc134
-rw-r--r--lib/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc161
-rwxr-xr-xlib/libcxx/utils/google-benchmark/tools/compare.py43
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json26
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json32
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/report.py362
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/util.py15
-rw-r--r--lib/libcxx/utils/libcxx/test/format.py25
-rw-r--r--lib/libcxx/utils/libcxx/test/googlebenchmark.py122
-rw-r--r--lib/libcxx/utils/libcxx/test/target_info.py36
69 files changed, 3937 insertions, 878 deletions
diff --git a/lib/libcxx/utils/ci/macos-backdeployment.sh b/lib/libcxx/utils/ci/macos-backdeployment.sh
new file mode 100755
index 00000000000..1f01fa397c9
--- /dev/null
+++ b/lib/libcxx/utils/ci/macos-backdeployment.sh
@@ -0,0 +1,180 @@
+#!/usr/bin/env bash
+
+set -ue
+
+function usage() {
+ cat <<EOM
+$(basename ${0}) [-h|--help] --libcxx-root <LIBCXX-ROOT> --libcxxabi-root <LIBCXXABI-ROOT> --std <STD> --arch <ARCHITECTURE> --deployment-target <TARGET> --sdk-version <SDK-VERSION> [--lit-args <ARGS...>]
+
+This script is used to continually test the back-deployment use case of libc++ and libc++abi on MacOS.
+
+ --libcxx-root Full path to the root of the libc++ repository to test.
+ --libcxxabi-root Full path to the root of the libc++abi repository to test.
+ --std Version of the C++ Standard to run the tests under (c++03, c++11, etc..).
+ --arch Architecture to build the tests for (32, 64).
+ --deployment-target The deployment target to run the tests for. This should be a version number of MacOS (e.g. 10.12). All MacOS versions until and including 10.7 are supported.
+ --sdk-version The version of the SDK to test with. This should be a version number of MacOS (e.g. 10.12). We'll link against the libc++ dylib in that SDK, but we'll run against the one on the given deployment target.
+ [--lit-args] Additional arguments to pass to lit (optional). If there are multiple arguments, quote them to pass them as a single argument to this script.
+ [--no-cleanup] Do not cleanup the temporary directory that was used for testing at the end. This can be useful to debug failures. Make sure to clean up manually after.
+ [-h, --help] Print this help.
+EOM
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --libcxx-root)
+ LIBCXX_ROOT="${2}"
+ if [[ ! -d "${LIBCXX_ROOT}" ]]; then
+ echo "--libcxx-root '${LIBCXX_ROOT}' is not a valid directory"
+ usage
+ exit 1
+ fi
+ shift; shift
+ ;;
+ --libcxxabi-root)
+ LIBCXXABI_ROOT="${2}"
+ if [[ ! -d "${LIBCXXABI_ROOT}" ]]; then
+ echo "--libcxxabi-root '${LIBCXXABI_ROOT}' is not a valid directory"
+ usage
+ exit 1
+ fi
+ shift; shift
+ ;;
+ --std)
+ STD="${2}"
+ shift; shift
+ ;;
+ --arch)
+ ARCH="${2}"
+ shift; shift
+ ;;
+ --deployment-target)
+ DEPLOYMENT_TARGET="${2}"
+ shift; shift
+ ;;
+ --sdk-version)
+ MACOS_SDK_VERSION="${2}"
+ shift; shift
+ ;;
+ --lit-args)
+ ADDITIONAL_LIT_ARGS="${2}"
+ shift; shift
+ ;;
+ --no-cleanup)
+ NO_CLEANUP=""
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "${1} is not a supported argument"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [[ -z ${LIBCXX_ROOT+x} ]]; then echo "--libcxx-root is a required parameter"; usage; exit 1; fi
+if [[ -z ${LIBCXXABI_ROOT+x} ]]; then echo "--libcxxabi-root is a required parameter"; usage; exit 1; fi
+if [[ -z ${STD+x} ]]; then echo "--std is a required parameter"; usage; exit 1; fi
+if [[ -z ${ARCH+x} ]]; then echo "--arch is a required parameter"; usage; exit 1; fi
+if [[ -z ${DEPLOYMENT_TARGET+x} ]]; then echo "--deployment-target is a required parameter"; usage; exit 1; fi
+if [[ -z ${MACOS_SDK_VERSION+x} ]]; then echo "--sdk-version is a required parameter"; usage; exit 1; fi
+if [[ -z ${ADDITIONAL_LIT_ARGS+x} ]]; then ADDITIONAL_LIT_ARGS=""; fi
+
+
+TEMP_DIR="$(mktemp -d)"
+echo "Created temporary directory ${TEMP_DIR}"
+function cleanup {
+ if [[ -z ${NO_CLEANUP+x} ]]; then
+ echo "Removing temporary directory ${TEMP_DIR}"
+ rm -rf "${TEMP_DIR}"
+ else
+ echo "Temporary directory is at '${TEMP_DIR}', make sure to clean it up yourself"
+ fi
+}
+trap cleanup EXIT
+
+
+LLVM_ROOT="${TEMP_DIR}/llvm"
+LIBCXX_BUILD_DIR="${TEMP_DIR}/libcxx-build"
+LIBCXX_INSTALL_DIR="${TEMP_DIR}/libcxx-install"
+LIBCXXABI_BUILD_DIR="${TEMP_DIR}/libcxxabi-build"
+LIBCXXABI_INSTALL_DIR="${TEMP_DIR}/libcxxabi-install"
+
+PREVIOUS_DYLIBS_URL="http://lab.llvm.org:8080/roots/libcxx-roots.tar.gz"
+LLVM_TARBALL_URL="https://github.com/llvm-mirror/llvm/archive/master.tar.gz"
+export CC="$(xcrun --find clang)"
+export CXX="$(xcrun --find clang++)"
+
+
+echo "@@@ Downloading LLVM tarball of master (only used for CMake configuration) @@@"
+mkdir "${LLVM_ROOT}"
+curl -L "${LLVM_TARBALL_URL}" | tar -xz --strip-components=1 -C "${LLVM_ROOT}"
+echo "@@@@@@"
+
+
+echo "@@@ Configuring architecture-related stuff @@@"
+if [[ "${ARCH}" == "64" ]]; then CMAKE_ARCH_STRING="x86_64"; else CMAKE_ARCH_STRING="i386"; fi
+if [[ "${ARCH}" == "64" ]]; then LIT_ARCH_STRING=""; else LIT_ARCH_STRING="--param=enable_32bit=true"; fi
+echo "@@@@@@"
+
+
+echo "@@@ Configuring CMake for libc++ @@@"
+mkdir -p "${LIBCXX_BUILD_DIR}"
+(cd "${LIBCXX_BUILD_DIR}" &&
+ xcrun cmake "${LIBCXX_ROOT}" -GNinja \
+ -DLLVM_PATH="${LLVM_ROOT}" \
+ -DCMAKE_INSTALL_PREFIX="${LIBCXX_INSTALL_DIR}" \
+ -DCMAKE_OSX_ARCHITECTURES="${CMAKE_ARCH_STRING}"
+)
+echo "@@@@@@"
+
+
+echo "@@@ Configuring CMake for libc++abi @@@"
+mkdir -p "${LIBCXXABI_BUILD_DIR}"
+(cd "${LIBCXXABI_BUILD_DIR}" &&
+ xcrun cmake "${LIBCXXABI_ROOT}" -GNinja \
+ -DLIBCXXABI_LIBCXX_PATH="${LIBCXX_ROOT}" \
+ -DLLVM_PATH="${LLVM_ROOT}" \
+ -DCMAKE_INSTALL_PREFIX="${LIBCXXABI_INSTALL_DIR}" \
+ -DCMAKE_OSX_ARCHITECTURES="${CMAKE_ARCH_STRING}"
+)
+echo "@@@@@@"
+
+
+echo "@@@ Installing the latest libc++ headers @@@"
+ninja -C "${LIBCXX_BUILD_DIR}" install-cxx-headers
+echo "@@@@@@"
+
+
+echo "@@@ Downloading dylibs for older deployment targets @@@"
+# TODO: The tarball should contain libc++abi.dylib too, we shouldn't be relying on the system's
+# TODO: We should also link against the libc++abi.dylib that was shipped in the SDK
+PREVIOUS_DYLIBS_DIR="${TEMP_DIR}/libcxx-dylibs"
+mkdir "${PREVIOUS_DYLIBS_DIR}"
+curl "${PREVIOUS_DYLIBS_URL}" | tar -xz --strip-components=1 -C "${PREVIOUS_DYLIBS_DIR}"
+LIBCXX_ON_DEPLOYMENT_TARGET="${PREVIOUS_DYLIBS_DIR}/macOS/${DEPLOYMENT_TARGET}/libc++.dylib"
+LIBCXXABI_ON_DEPLOYMENT_TARGET="/usr/lib/libc++abi.dylib"
+LIBCXX_IN_SDK="${PREVIOUS_DYLIBS_DIR}/macOS/${MACOS_SDK_VERSION}/libc++.dylib"
+echo "@@@@@@"
+
+
+# TODO: We need to also run the tests for libc++abi.
+# TODO: Make sure lit will actually run against the libc++abi we specified
+echo "@@@ Running tests for libc++ @@@"
+"${LIBCXX_BUILD_DIR}/bin/llvm-lit" -sv "${LIBCXX_ROOT}/test" \
+ --param=enable_experimental=false \
+ --param=enable_filesystem=false \
+ ${LIT_ARCH_STRING} \
+ --param=cxx_under_test="${CXX}" \
+ --param=cxx_headers="${LIBCXX_INSTALL_DIR}/include/c++/v1" \
+ --param=std="${STD}" \
+ --param=platform="macosx${DEPLOYMENT_TARGET}" \
+ --param=cxx_runtime_root="$(dirname "${LIBCXX_ON_DEPLOYMENT_TARGET}")" \
+ --param=abi_library_path="$(dirname "${LIBCXXABI_ON_DEPLOYMENT_TARGET}")" \
+ --param=use_system_cxx_lib="$(dirname "${LIBCXX_IN_SDK}")" \
+ ${ADDITIONAL_LIT_ARGS}
+echo "@@@@@@"
diff --git a/lib/libcxx/utils/ci/macos-trunk.sh b/lib/libcxx/utils/ci/macos-trunk.sh
new file mode 100755
index 00000000000..b365cc6d8e3
--- /dev/null
+++ b/lib/libcxx/utils/ci/macos-trunk.sh
@@ -0,0 +1,153 @@
+#!/usr/bin/env bash
+
+set -ue
+
+function usage() {
+ cat <<EOM
+$(basename ${0}) [-h|--help] --libcxx-root <LIBCXX-ROOT> --libcxxabi-root <LIBCXXABI-ROOT> --std <STD> --arch <ARCHITECTURE> [--lit-args <ARGS...>]
+
+This script is used to continually test libc++ and libc++abi trunk on MacOS.
+
+ --libcxx-root Full path to the root of the libc++ repository to test.
+ --libcxxabi-root Full path to the root of the libc++abi repository to test.
+ --std Version of the C++ Standard to run the tests under (c++03, c++11, etc..).
+ --arch Architecture to build the tests for (32, 64).
+ [--lit-args] Additional arguments to pass to lit (optional). If there are multiple arguments, quote them to pass them as a single argument to this script.
+ [--no-cleanup] Do not cleanup the temporary directory that was used for testing at the end. This can be useful to debug failures. Make sure to clean up manually after.
+ [-h, --help] Print this help.
+EOM
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --libcxx-root)
+ LIBCXX_ROOT="${2}"
+ if [[ ! -e "${LIBCXX_ROOT}" ]]; then
+ echo "--libcxx-root '${LIBCXX_ROOT}' is not a valid directory"
+ usage
+ exit 1
+ fi
+ shift; shift
+ ;;
+ --libcxxabi-root)
+ LIBCXXABI_ROOT="${2}"
+ if [[ ! -e "${LIBCXXABI_ROOT}" ]]; then
+ echo "--libcxxabi-root '${LIBCXXABI_ROOT}' is not a valid directory"
+ usage
+ exit 1
+ fi
+ shift; shift
+ ;;
+ --std)
+ STD="${2}"
+ shift; shift
+ ;;
+ --arch)
+ ARCH="${2}"
+ shift; shift
+ ;;
+ --lit-args)
+ ADDITIONAL_LIT_ARGS="${2}"
+ shift; shift
+ ;;
+ --no-cleanup)
+ NO_CLEANUP=""
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "${1} is not a supported argument"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [[ -z ${LIBCXX_ROOT+x} ]]; then echo "--libcxx-root is a required parameter"; usage; exit 1; fi
+if [[ -z ${LIBCXXABI_ROOT+x} ]]; then echo "--libcxxabi-root is a required parameter"; usage; exit 1; fi
+if [[ -z ${STD+x} ]]; then echo "--std is a required parameter"; usage; exit 1; fi
+if [[ -z ${ARCH+x} ]]; then echo "--arch is a required parameter"; usage; exit 1; fi
+if [[ -z ${ADDITIONAL_LIT_ARGS+x} ]]; then ADDITIONAL_LIT_ARGS=""; fi
+
+
+TEMP_DIR="$(mktemp -d)"
+echo "Created temporary directory ${TEMP_DIR}"
+function cleanup {
+ if [[ -z ${NO_CLEANUP+x} ]]; then
+ echo "Removing temporary directory ${TEMP_DIR}"
+ rm -rf "${TEMP_DIR}"
+ else
+ echo "Temporary directory is at '${TEMP_DIR}', make sure to clean it up yourself"
+ fi
+}
+trap cleanup EXIT
+
+
+LLVM_ROOT="${TEMP_DIR}/llvm"
+LIBCXX_BUILD_DIR="${TEMP_DIR}/libcxx-build"
+LIBCXX_INSTALL_DIR="${TEMP_DIR}/libcxx-install"
+LIBCXXABI_BUILD_DIR="${TEMP_DIR}/libcxxabi-build"
+LIBCXXABI_INSTALL_DIR="${TEMP_DIR}/libcxxabi-install"
+
+LLVM_TARBALL_URL="https://github.com/llvm-mirror/llvm/archive/master.tar.gz"
+export CC="$(xcrun --find clang)"
+export CXX="$(xcrun --find clang++)"
+
+
+echo "@@@ Downloading LLVM tarball of master (only used for CMake configuration) @@@"
+mkdir "${LLVM_ROOT}"
+curl -L "${LLVM_TARBALL_URL}" | tar -xz --strip-components=1 -C "${LLVM_ROOT}"
+echo "@@@@@@"
+
+
+echo "@@@ Setting up LIT flags @@@"
+LIT_FLAGS="-sv --param=std=${STD} ${ADDITIONAL_LIT_ARGS}"
+if [[ "${ARCH}" == "32" ]]; then
+ LIT_FLAGS+=" --param=enable_32bit=true"
+fi
+echo "@@@@@@"
+
+
+echo "@@@ Configuring CMake for libc++ @@@"
+mkdir -p "${LIBCXX_BUILD_DIR}"
+(cd "${LIBCXX_BUILD_DIR}" &&
+ xcrun cmake "${LIBCXX_ROOT}" -GNinja \
+ -DLLVM_PATH="${LLVM_ROOT}" \
+ -DCMAKE_INSTALL_PREFIX="${LIBCXX_INSTALL_DIR}" \
+ -DLLVM_LIT_ARGS="${LIT_FLAGS}" \
+ -DCMAKE_OSX_ARCHITECTURES="i386;x86_64" # Build a universal dylib
+)
+echo "@@@@@@"
+
+
+echo "@@@ Configuring CMake for libc++abi @@@"
+mkdir -p "${LIBCXXABI_BUILD_DIR}"
+(cd "${LIBCXXABI_BUILD_DIR}" &&
+ xcrun cmake "${LIBCXXABI_ROOT}" -GNinja \
+ -DLIBCXXABI_LIBCXX_PATH="${LIBCXX_ROOT}" \
+ -DLLVM_PATH="${LLVM_ROOT}" \
+ -DCMAKE_INSTALL_PREFIX="${LIBCXXABI_INSTALL_DIR}" \
+ -DLLVM_LIT_ARGS="${LIT_FLAGS}" \
+ -DCMAKE_OSX_ARCHITECTURES="i386;x86_64" # Build a universal dylib
+)
+echo "@@@@@@"
+
+
+echo "@@@ Building libc++.dylib and libc++abi.dylib from sources (just to make sure it works) @@@"
+ninja -C "${LIBCXX_BUILD_DIR}" install-cxx
+ninja -C "${LIBCXXABI_BUILD_DIR}" install-cxxabi
+echo "@@@@@@"
+
+
+echo "@@@ Running tests for libc++ @@@"
+# TODO: We should run check-cxx-abilist too
+ninja -C "${LIBCXX_BUILD_DIR}" check-cxx
+echo "@@@@@@"
+
+
+echo "@@@ Running tests for libc++abi @@@"
+ninja -C "${LIBCXXABI_BUILD_DIR}" check-cxxabi
+echo "@@@@@@"
diff --git a/lib/libcxx/utils/docker/build_docker_image.sh b/lib/libcxx/utils/docker/build_docker_image.sh
new file mode 100755
index 00000000000..0d2d6d313c7
--- /dev/null
+++ b/lib/libcxx/utils/docker/build_docker_image.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+#===- libcxx/utils/docker/build_docker_image.sh ----------------------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===----------------------------------------------------------------------===//
+set -e
+
+IMAGE_SOURCE=""
+DOCKER_REPOSITORY=""
+DOCKER_TAG=""
+
+function show_usage() {
+ cat << EOF
+Usage: build_docker_image.sh [options] [-- [cmake_args]...]
+
+Available options:
+ General:
+ -h|--help show this help message
+ Docker-specific:
+ -s|--source image source dir (i.e. debian8, nvidia-cuda, etc)
+ -d|--docker-repository docker repository for the image
+ -t|--docker-tag docker tag for the image
+
+Required options: --source and --docker-repository.
+
+For example, running:
+$ build_docker_image.sh -s debian9 -d mydocker/debian9-clang -t latest
+will produce two docker images:
+ mydocker/debian9-clang-build:latest - an intermediate image used to compile
+ clang.
+ mydocker/clang-debian9:latest - a small image with preinstalled clang.
+Please note that this example produces a not very useful installation, since it
+doesn't override CMake defaults, which produces a Debug and non-boostrapped
+version of clang.
+EOF
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ -s|--source)
+ shift
+ IMAGE_SOURCE="$1"
+ shift
+ ;;
+ -d|--docker-repository)
+ shift
+ DOCKER_REPOSITORY="$1"
+ shift
+ ;;
+ -t|--docker-tag)
+ shift
+ DOCKER_TAG="$1"
+ shift
+ ;;
+ *)
+ echo "Unknown argument $1"
+ exit 1
+ ;;
+ esac
+done
+
+
+command -v docker >/dev/null ||
+ {
+ echo "Docker binary cannot be found. Please install Docker to use this script."
+ exit 1
+ }
+
+if [ "$IMAGE_SOURCE" == "" ]; then
+ echo "Required argument missing: --source"
+ exit 1
+fi
+
+if [ "$DOCKER_REPOSITORY" == "" ]; then
+ echo "Required argument missing: --docker-repository"
+ exit 1
+fi
+
+SOURCE_DIR=$(dirname $0)
+if [ ! -d "$SOURCE_DIR/$IMAGE_SOURCE" ]; then
+ echo "No sources for '$IMAGE_SOURCE' were found in $SOURCE_DIR"
+ exit 1
+fi
+
+BUILD_DIR=$(mktemp -d)
+trap "rm -rf $BUILD_DIR" EXIT
+echo "Using a temporary directory for the build: $BUILD_DIR"
+
+cp -r "$SOURCE_DIR/$IMAGE_SOURCE" "$BUILD_DIR/$IMAGE_SOURCE"
+cp -r "$SOURCE_DIR/scripts" "$BUILD_DIR/scripts"
+
+
+if [ "$DOCKER_TAG" != "" ]; then
+ DOCKER_TAG=":$DOCKER_TAG"
+fi
+
+echo "Building ${DOCKER_REPOSITORY}${DOCKER_TAG} from $IMAGE_SOURCE"
+docker build -t "${DOCKER_REPOSITORY}${DOCKER_TAG}" \
+ -f "$BUILD_DIR/$IMAGE_SOURCE/Dockerfile" \
+ "$BUILD_DIR"
+echo "Done"
diff --git a/lib/libcxx/utils/docker/debian9/Dockerfile b/lib/libcxx/utils/docker/debian9/Dockerfile
new file mode 100644
index 00000000000..8dc43f40105
--- /dev/null
+++ b/lib/libcxx/utils/docker/debian9/Dockerfile
@@ -0,0 +1,115 @@
+#===- libcxx/utils/docker/debian9/Dockerfile -------------------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===----------------------------------------------------------------------===//
+
+# Setup the base builder image with the packages we'll need to build GCC and Clang from source.
+FROM launcher.gcr.io/google/debian9:latest as builder-base
+LABEL maintainer "libc++ Developers"
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends \
+ ca-certificates \
+ gnupg \
+ build-essential \
+ wget \
+ subversion \
+ unzip \
+ automake \
+ python \
+ cmake \
+ ninja-build \
+ curl \
+ git \
+ gcc-multilib \
+ g++-multilib \
+ libc6-dev \
+ bison \
+ flex \
+ libtool \
+ autoconf \
+ binutils-dev \
+ binutils-gold \
+ software-properties-common && \
+ update-alternatives --install "/usr/bin/ld" "ld" "/usr/bin/ld.gold" 20 && \
+ update-alternatives --install "/usr/bin/ld" "ld" "/usr/bin/ld.bfd" 10
+
+# Build GCC 4.9 for testing our C++11 against
+FROM builder-base as gcc-49-builder
+LABEL maintainer "libc++ Developers"
+
+ADD scripts/build_gcc.sh /tmp/build_gcc.sh
+
+RUN git clone --depth=1 --branch gcc-4_9_4-release git://gcc.gnu.org/git/gcc.git /tmp/gcc-4.9.4
+RUN cd /tmp/gcc-4.9.4/ && ./contrib/download_prerequisites
+RUN /tmp/build_gcc.sh --source /tmp/gcc-4.9.4 --to /opt/gcc-4.9.4
+
+# Build GCC ToT for testing in all dialects.
+FROM builder-base as gcc-tot-builder
+LABEL maintainer "libc++ Developers"
+
+ADD scripts/build_gcc.sh /tmp/build_gcc.sh
+
+RUN git clone --depth=1 git://gcc.gnu.org/git/gcc.git /tmp/gcc-tot
+RUN cd /tmp/gcc-tot && ./contrib/download_prerequisites
+RUN /tmp/build_gcc.sh --source /tmp/gcc-tot --to /opt/gcc-tot
+
+# Build LLVM 4.0 which is used to test against a "legacy" compiler.
+FROM builder-base as llvm-4-builder
+LABEL maintainer "libc++ Developers"
+
+ADD scripts/checkout_git.sh /tmp/checkout_git.sh
+ADD scripts/build_install_llvm.sh /tmp/build_install_llvm.sh
+
+RUN /tmp/checkout_git.sh --to /tmp/llvm-4.0 -p clang -p compiler-rt --branch release_40
+RUN /tmp/build_install_llvm.sh \
+ --install /opt/llvm-4.0 \
+ --source /tmp/llvm-4.0 \
+ --build /tmp/build-llvm-4.0 \
+ -i install-clang -i install-clang-headers \
+ -i install-compiler-rt \
+ -- \
+ -DCMAKE_BUILD_TYPE=RELEASE \
+ -DLLVM_ENABLE_ASSERTIONS=ON
+
+# Stage 2. Produce a minimal release image with build results.
+FROM launcher.gcr.io/google/debian9:latest
+LABEL maintainer "libc++ Developers"
+
+# Copy over the GCC and Clang installations
+COPY --from=gcc-49-builder /opt/gcc-4.9.4 /opt/gcc-4.9.4
+COPY --from=gcc-tot-builder /opt/gcc-tot /opt/gcc-tot
+COPY --from=llvm-4-builder /opt/llvm-4.0 /opt/llvm-4.0
+
+RUN ln -s /opt/gcc-4.9.4/bin/gcc /usr/local/bin/gcc-4.9 && \
+ ln -s /opt/gcc-4.9.4/bin/g++ /usr/local/bin/g++-4.9
+
+RUN apt-get update && \
+ apt-get install -y \
+ ca-certificates \
+ gnupg \
+ build-essential \
+ apt-transport-https \
+ curl \
+ software-properties-common
+
+RUN apt-get install -y --no-install-recommends \
+ systemd \
+ sysvinit-utils \
+ cmake \
+ subversion \
+ git \
+ ninja-build \
+ gcc-multilib \
+ g++-multilib \
+ python \
+ buildbot-slave
+
+ADD scripts/install_clang_packages.sh /tmp/install_clang_packages.sh
+RUN /tmp/install_clang_packages.sh && rm /tmp/install_clang_packages.sh
+
+RUN git clone https://git.llvm.org/git/libcxx.git /libcxx
diff --git a/lib/libcxx/utils/docker/scripts/build_gcc.sh b/lib/libcxx/utils/docker/scripts/build_gcc.sh
new file mode 100755
index 00000000000..85feb16acd6
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/build_gcc.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+#===- libcxx/utils/docker/scripts/build-gcc.sh ----------------------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===-----------------------------------------------------------------------===//
+
+set -e
+
+
+function show_usage() {
+ cat << EOF
+Usage: build-gcc.sh [options]
+
+Run autoconf with the specified arguments. Used inside docker container.
+
+Available options:
+ -h|--help show this help message
+ --source the source path from which to run the configuration.
+ --to destination directory where to install the targets.
+Required options: --to, at least one --install-target.
+
+All options after '--' are passed to CMake invocation.
+EOF
+}
+
+GCC_INSTALL_DIR=""
+GCC_SOURCE_DIR=""
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --to)
+ shift
+ GCC_INSTALL_DIR="$1"
+ shift
+ ;;
+ --source)
+ shift
+ GCC_SOURCE_DIR="$1"
+ shift
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ esac
+done
+
+if [ "$GCC_INSTALL_DIR" == "" ]; then
+ echo "No install directory. Please specify the --to argument."
+ exit 1
+fi
+
+if [ "$GCC_SOURCE_DIR" == "" ]; then
+ echo "No source directory. Please specify the --source argument."
+ exit 1
+fi
+
+GCC_NAME=`basename $GCC_SOURCE_DIR`
+GCC_BUILD_DIR="/tmp/gcc-build-root/build-$GCC_NAME"
+
+mkdir -p "$GCC_INSTALL_DIR"
+mkdir -p "$GCC_BUILD_DIR"
+pushd "$GCC_BUILD_DIR"
+
+# Run the build as specified in the build arguments.
+echo "Running configuration"
+$GCC_SOURCE_DIR/configure --prefix=$GCC_INSTALL_DIR \
+ --disable-bootstrap --disable-libgomp --disable-libitm \
+ --disable-libvtv --disable-libcilkrts --disable-libmpx \
+ --disable-liboffloadmic --disable-libcc1 --enable-languages=c,c++
+
+NPROC=`nproc`
+echo "Running build with $NPROC threads"
+make -j$NPROC
+
+echo "Installing to $GCC_INSTALL_DIR"
+make install -j$NPROC
+
+popd
+
+# Cleanup.
+rm -rf "$GCC_BUILD_DIR"
+
+echo "Done" \ No newline at end of file
diff --git a/lib/libcxx/utils/docker/scripts/build_install_llvm.sh b/lib/libcxx/utils/docker/scripts/build_install_llvm.sh
new file mode 100755
index 00000000000..6f19a96a1b7
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/build_install_llvm.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+#===- llvm/utils/docker/scripts/build_install_llvm.sh ---------------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===-----------------------------------------------------------------------===//
+
+set -e
+
+function show_usage() {
+ cat << EOF
+Usage: build_install_llvm.sh [options] -- [cmake-args]
+
+Run cmake with the specified arguments. Used inside docker container.
+Passes additional -DCMAKE_INSTALL_PREFIX and puts the build results into
+the directory specified by --to option.
+
+Available options:
+ -h|--help show this help message
+ -i|--install-target name of a cmake install target to build and include in
+ the resulting archive. Can be specified multiple times.
+ --install destination directory where to install the targets.
+ --source location of the source tree.
+ --build location to use as the build directory.
+Required options: --to, --source, --build, and at least one --install-target.
+
+All options after '--' are passed to CMake invocation.
+EOF
+}
+
+CMAKE_ARGS=""
+CMAKE_INSTALL_TARGETS=""
+CLANG_INSTALL_DIR=""
+CLANG_SOURCE_DIR=""
+CLANG_BUILD_DIR=""
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -i|--install-target)
+ shift
+ CMAKE_INSTALL_TARGETS="$CMAKE_INSTALL_TARGETS $1"
+ shift
+ ;;
+ --source)
+ shift
+ CLANG_SOURCE_DIR="$1"
+ shift
+ ;;
+ --build)
+ shift
+ CLANG_BUILD_DIR="$1"
+ shift
+ ;;
+ --install)
+ shift
+ CLANG_INSTALL_DIR="$1"
+ shift
+ ;;
+ --)
+ shift
+ CMAKE_ARGS="$*"
+ shift $#
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ esac
+done
+
+if [ "$CLANG_SOURCE_DIR" == "" ]; then
+ echo "No source directory. Please pass --source."
+ exit 1
+fi
+
+if [ "$CLANG_BUILD_DIR" == "" ]; then
+ echo "No build directory. Please pass --build"
+ exit 1
+fi
+
+if [ "$CMAKE_INSTALL_TARGETS" == "" ]; then
+ echo "No install targets. Please pass one or more --install-target."
+ exit 1
+fi
+
+if [ "$CLANG_INSTALL_DIR" == "" ]; then
+ echo "No install directory. Please specify the --to argument."
+ exit 1
+fi
+
+echo "Building in $CLANG_BUILD_DIR"
+mkdir -p "$CLANG_BUILD_DIR"
+pushd "$CLANG_BUILD_DIR"
+
+# Run the build as specified in the build arguments.
+echo "Running build"
+cmake -GNinja \
+ -DCMAKE_INSTALL_PREFIX="$CLANG_INSTALL_DIR" \
+ $CMAKE_ARGS \
+ "$CLANG_SOURCE_DIR"
+ninja $CMAKE_INSTALL_TARGETS
+
+popd
+
+# Cleanup.
+rm -rf "$CLANG_BUILD_DIR"
+
+echo "Done"
diff --git a/lib/libcxx/utils/docker/scripts/checkout_git.sh b/lib/libcxx/utils/docker/scripts/checkout_git.sh
new file mode 100755
index 00000000000..222700229c5
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/checkout_git.sh
@@ -0,0 +1,130 @@
+#!/usr/bin/env bash
+#===- llvm/utils/docker/scripts/checkout.sh ---------------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===-----------------------------------------------------------------------===//
+
+set -e
+
+function show_usage() {
+ cat << EOF
+Usage: checkout.sh [options]
+
+Checkout svn sources into /tmp/clang-build/src. Used inside a docker container.
+
+Available options:
+ -h|--help show this help message
+ -b|--branch svn branch to checkout, i.e. 'trunk',
+ 'branches/release_40'
+ (default: 'trunk')
+ -p|--llvm-project name of an svn project to checkout.
+ For clang, please use 'clang', not 'cfe'.
+ Project 'llvm' is always included and ignored, if
+ specified.
+ Can be specified multiple times.
+EOF
+}
+
+LLVM_BRANCH=""
+# We always checkout llvm
+LLVM_PROJECTS="llvm"
+SOURCE_DIR=""
+
+function contains_project() {
+ local TARGET_PROJ="$1"
+ local PROJ
+ for PROJ in $LLVM_PROJECTS; do
+ if [ "$PROJ" == "$TARGET_PROJ" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --to)
+ shift
+ SOURCE_DIR="$1"
+ shift
+ ;;
+ -b|--branch)
+ shift
+ LLVM_BRANCH="$1"
+ shift
+ ;;
+ -p|--llvm-project)
+ shift
+ PROJ="$1"
+ shift
+
+ if [ "$PROJ" == "cfe" ]; then
+ PROJ="clang"
+ fi
+
+ if ! contains_project "$PROJ" ; then
+ if [ "$PROJ" == "clang-tools-extra" ] && [ ! contains_project "clang" ]; then
+ echo "Project 'clang-tools-extra' specified before 'clang'. Adding 'clang' to a list of projects first."
+ LLVM_PROJECTS="$LLVM_PROJECTS clang"
+ fi
+ LLVM_PROJECTS="$LLVM_PROJECTS $PROJ"
+ else
+ echo "Project '$PROJ' is already enabled, ignoring extra occurrences."
+ fi
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ esac
+done
+
+if [ "$SOURCE_DIR" == "" ]; then
+ echo "Must specify checkout directory using --to"
+ exit 1
+fi
+
+if [ "$LLVM_BRANCH" == "" ]; then
+ GIT_BRANCH_ARG=""
+else
+ GIT_BRANCH_ARG="--branch $LLVM_BRANCH"
+fi
+
+if [ "$LLVM_SVN_REV" != "" ]; then
+ SVN_REV_ARG="-r$LLVM_SVN_REV"
+ echo "Checking out svn revision r$LLVM_SVN_REV."
+else
+ SVN_REV_ARG=""
+ echo "Checking out latest svn revision."
+fi
+
+# Get the sources from svn.
+echo "Checking out sources from git"
+
+for LLVM_PROJECT in $LLVM_PROJECTS; do
+ if [ "$LLVM_PROJECT" == "llvm" ]; then
+ CHECKOUT_DIR="$SOURCE_DIR"
+ elif [ "$LLVM_PROJECT" == "libcxx" ] || [ "$LLVM_PROJECT" == "libcxxabi" ] || [ "$LLVM_PROJECT" == "compiler-rt" ]; then
+ CHECKOUT_DIR="$SOURCE_DIR/projects/$LLVM_PROJECT"
+ elif [ "$LLVM_PROJECT" == "clang" ]; then
+ CHECKOUT_DIR="$SOURCE_DIR/tools/clang"
+ elif [ "$LLVM_PROJECT" == "clang-tools-extra" ]; then
+ CHECKOUT_DIR="$SOURCE_DIR/tools/clang/tools/extra"
+ else
+ CHECKOUT_DIR="$SOURCE_DIR/$LLVM_PROJECT"
+ fi
+
+ echo "Checking out https://git.llvm.org/git/$LLVM_PROJECT to $CHECKOUT_DIR"
+ git clone --depth=1 $GIT_BRANCH_ARG \
+ "https://git.llvm.org/git/$LLVM_PROJECT.git" \
+ "$CHECKOUT_DIR"
+done
+
+echo "Done"
diff --git a/lib/libcxx/utils/docker/scripts/docker_start_buildbots.sh b/lib/libcxx/utils/docker/scripts/docker_start_buildbots.sh
new file mode 100755
index 00000000000..f47ddcd2481
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/docker_start_buildbots.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+set -x
+
+# Update the libc++ sources in the image in order to use the most recent version of
+# run_buildbots.sh
+cd /libcxx
+git pull
+/libcxx/utils/docker/scripts/run_buildbot.sh "$@"
diff --git a/lib/libcxx/utils/docker/scripts/install_clang_packages.sh b/lib/libcxx/utils/docker/scripts/install_clang_packages.sh
new file mode 100755
index 00000000000..fabee0e8147
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/install_clang_packages.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+#===- libcxx/utils/docker/scripts/install_clang_package.sh -----------------===//
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===-----------------------------------------------------------------------===//
+
+set -e
+
+function show_usage() {
+ cat << EOF
+Usage: install_clang_package.sh [options]
+
+Install
+Available options:
+ -h|--help show this help message
+ --version the numeric version of the package to use.
+EOF
+}
+
+VERSION=""
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --version)
+ shift
+ VERSION="$1"
+ shift
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ esac
+done
+
+
+
+curl -fsSL https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
+add-apt-repository -s "deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs) main"
+apt-get update
+apt-get install -y --no-install-recommends clang
+
+echo "Testing clang version..."
+clang --version
+
+echo "Testing clang++ version..."
+clang++ --version
+
+# Figure out the libc++ and libc++abi package versions that we want.
+if [ "$VERSION" == "" ]; then
+ VERSION="$(apt-cache search 'libc\+\+-[0-9]-dev' | awk '{print $1}' | awk -F- '{print $2}')"
+ echo "Installing version '$VERSION'"
+fi
+
+apt-get install -y --no-install-recommends "libc++-$VERSION-dev" "libc++abi-$VERSION-dev"
+
+echo "Done"
diff --git a/lib/libcxx/utils/docker/scripts/run_buildbot.sh b/lib/libcxx/utils/docker/scripts/run_buildbot.sh
new file mode 100755
index 00000000000..45f5a1cf6bf
--- /dev/null
+++ b/lib/libcxx/utils/docker/scripts/run_buildbot.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+set -x
+
+BOT_DIR=/b
+BOT_NAME=$1
+BOT_PASS=$2
+
+mkdir -p $BOT_DIR
+
+#curl "https://repo.stackdriver.com/stack-install.sh" | bash -s -- --write-gcm
+
+apt-get update -y
+apt-get upgrade -y
+
+# FIXME(EricWF): Remove this hack. It's only in place to temporarily fix linking libclang_rt from the
+# debian packages.
+# WARNING: If you're not a buildbot, DO NOT RUN!
+apt-get install lld-8
+rm /usr/bin/ld
+ln -s /usr/bin/lld-8 /usr/bin/ld
+
+systemctl set-property buildslave.service TasksMax=100000
+
+buildslave stop $BOT_DIR
+
+chown buildbot:buildbot $BOT_DIR
+
+echo "Connecting as $BOT_NAME"
+buildslave create-slave --allow-shutdown=signal $BOT_DIR lab.llvm.org:9990 $BOT_NAME $BOT_PASS
+
+echo "Eric Fiselier <ericwf@google.com>" > $BOT_DIR/info/admin
+
+{
+ uname -a | head -n1
+ cmake --version | head -n1
+ g++ --version | head -n1
+ ld --version | head -n1
+ date
+ lscpu
+} > $BOT_DIR/info/host
+
+echo "SLAVE_RUNNER=/usr/bin/buildslave
+SLAVE_ENABLED[1]=\"1\"
+SLAVE_NAME[1]=\"buildslave1\"
+SLAVE_USER[1]=\"buildbot\"
+SLAVE_BASEDIR[1]=\"$BOT_DIR\"
+SLAVE_OPTIONS[1]=\"\"
+SLAVE_PREFIXCMD[1]=\"\"" > /etc/default/buildslave
+
+chown -R buildbot:buildbot $BOT_DIR
+systemctl daemon-reload
+service buildslave restart
+
+sleep 30
+cat $BOT_DIR/twistd.log
+grep "slave is ready" $BOT_DIR/twistd.log || shutdown now
+
+# GCE can restart instance after 24h in the middle of the build.
+# Gracefully restart before that happen.
+sleep 72000
+while pkill -SIGHUP buildslave; do sleep 5; done;
+shutdown now \ No newline at end of file
diff --git a/lib/libcxx/utils/google-benchmark/.clang-format b/lib/libcxx/utils/google-benchmark/.clang-format
new file mode 100644
index 00000000000..4b3f13fa55e
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/.clang-format
@@ -0,0 +1,5 @@
+---
+Language: Cpp
+BasedOnStyle: Google
+...
+
diff --git a/lib/libcxx/utils/google-benchmark/.gitignore b/lib/libcxx/utils/google-benchmark/.gitignore
index 3c1b4f2183e..8c30e28f53a 100644
--- a/lib/libcxx/utils/google-benchmark/.gitignore
+++ b/lib/libcxx/utils/google-benchmark/.gitignore
@@ -6,6 +6,7 @@
*.dylib
*.cmake
!/cmake/*.cmake
+!/test/AssemblyTests.cmake
*~
*.pyc
__pycache__
@@ -41,6 +42,17 @@ build.ninja
install_manifest.txt
rules.ninja
+# bazel output symlinks.
+bazel-*
+
# out-of-source build top-level folders.
build/
_build/
+build*/
+
+# in-source dependencies
+/googletest/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+CMakeSettings.json
diff --git a/lib/libcxx/utils/google-benchmark/.travis-libcxx-setup.sh b/lib/libcxx/utils/google-benchmark/.travis-libcxx-setup.sh
new file mode 100644
index 00000000000..a591743c6a6
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/.travis-libcxx-setup.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Install a newer CMake version
+curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh
+chmod +x install-cmake.sh
+sudo ./install-cmake.sh --prefix=/usr/local --skip-license
+
+# Checkout LLVM sources
+git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source
+git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx
+git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi
+
+# Setup libc++ options
+if [ -z "$BUILD_32_BITS" ]; then
+ export BUILD_32_BITS=OFF && echo disabling 32 bit build
+fi
+
+# Build and install libc++ (Use unstable ABI for better sanitizer coverage)
+mkdir llvm-build && cd llvm-build
+cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \
+ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \
+ -DLIBCXX_ABI_UNSTABLE=ON \
+ -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \
+ -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \
+ ../llvm-source
+make cxx -j2
+sudo make install-cxxabi install-cxx
+cd ../
diff --git a/lib/libcxx/utils/google-benchmark/.travis.yml b/lib/libcxx/utils/google-benchmark/.travis.yml
new file mode 100644
index 00000000000..4625dfb0878
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/.travis.yml
@@ -0,0 +1,199 @@
+sudo: required
+dist: trusty
+language: cpp
+
+env:
+ global:
+ - /usr/local/bin:$PATH
+
+matrix:
+ include:
+ - compiler: gcc
+ addons:
+ apt:
+ packages:
+ - lcov
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage
+ - compiler: gcc
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug
+ - compiler: gcc
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release
+ - compiler: gcc
+ addons:
+ apt:
+ packages:
+ - g++-multilib
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON
+ - compiler: gcc
+ addons:
+ apt:
+ packages:
+ - g++-multilib
+ env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
+ - compiler: gcc
+ env:
+ - INSTALL_GCC6_FROM_PPA=1
+ - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
+ - ENABLE_SANITIZER=1
+ - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
+ - compiler: clang
+ env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
+ - compiler: clang
+ env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release
+ # Clang w/ libc++
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ clang-3.8
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
+ - LIBCXX_BUILD=1
+ - EXTRA_FLAGS="-stdlib=libc++"
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ clang-3.8
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
+ - LIBCXX_BUILD=1
+ - EXTRA_FLAGS="-stdlib=libc++"
+ # Clang w/ 32bit libc++
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ - g++-multilib
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
+ - LIBCXX_BUILD=1
+ - BUILD_32_BITS=ON
+ - EXTRA_FLAGS="-stdlib=libc++ -m32"
+ # Clang w/ 32bit libc++
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ - clang-3.8
+ - g++-multilib
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
+ - LIBCXX_BUILD=1
+ - BUILD_32_BITS=ON
+ - EXTRA_FLAGS="-stdlib=libc++ -m32"
+ # Clang w/ libc++, ASAN, UBSAN
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ clang-3.8
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
+ - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
+ - ENABLE_SANITIZER=1
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
+ - UBSAN_OPTIONS=print_stacktrace=1
+ # Clang w/ libc++ and MSAN
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ clang-3.8
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
+ - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
+ - ENABLE_SANITIZER=1
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
+ # Clang w/ libc++ and MSAN
+ - compiler: clang
+ addons:
+ apt:
+ packages:
+ clang-3.8
+ env:
+ - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
+ - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
+ - ENABLE_SANITIZER=1
+ - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
+ - os: osx
+ osx_image: xcode8.3
+ compiler: clang
+ env:
+ - COMPILER=clang++ BUILD_TYPE=Debug
+ - os: osx
+ osx_image: xcode8.3
+ compiler: clang
+ env:
+ - COMPILER=clang++ BUILD_TYPE=Release
+ - os: osx
+ osx_image: xcode8.3
+ compiler: clang
+ env:
+ - COMPILER=clang++ BUILD_TYPE=Release BUILD_32_BITS=ON
+ - os: osx
+ osx_image: xcode8.3
+ compiler: gcc
+ env:
+ - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug
+
+before_script:
+ - if [ -n "${LIBCXX_BUILD}" ]; then
+ source .travis-libcxx-setup.sh;
+ fi
+ - if [ -n "${ENABLE_SANITIZER}" ]; then
+ export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF";
+ else
+ export EXTRA_OPTIONS="";
+ fi
+ - mkdir -p build && cd build
+
+before_install:
+ - if [ -z "$BUILD_32_BITS" ]; then
+ export BUILD_32_BITS=OFF && echo disabling 32 bit build;
+ fi
+ - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
+ sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test";
+ sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60";
+ fi
+
+install:
+ - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
+ travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6;
+ fi
+ - if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then
+ travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools;
+ sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/;
+ fi
+ - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
+ PATH=~/.local/bin:${PATH};
+ pip install --user --upgrade pip;
+ travis_wait pip install --user cpp-coveralls;
+ fi
+ - if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then
+ rm -f /usr/local/include/c++;
+ brew update;
+ travis_wait brew install gcc@7;
+ fi
+ - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
+ sudo apt-get update -qq;
+ sudo apt-get install -qq unzip;
+ wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh;
+ travis_wait sudo bash bazel-installer.sh;
+ fi
+ - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
+ curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh;
+ travis_wait sudo bash bazel-installer.sh;
+ fi
+
+script:
+ - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
+ - make
+ - ctest -C ${BUILD_TYPE} --output-on-failure
+ - bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/...
+
+after_success:
+ - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
+ coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .;
+ fi
diff --git a/lib/libcxx/utils/google-benchmark/.ycm_extra_conf.py b/lib/libcxx/utils/google-benchmark/.ycm_extra_conf.py
new file mode 100644
index 00000000000..5649ddcc749
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/.ycm_extra_conf.py
@@ -0,0 +1,115 @@
+import os
+import ycm_core
+
+# These are the compilation flags that will be used in case there's no
+# compilation database set (by default, one is not set).
+# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
+flags = [
+'-Wall',
+'-Werror',
+'-pedantic-errors',
+'-std=c++0x',
+'-fno-strict-aliasing',
+'-O3',
+'-DNDEBUG',
+# ...and the same thing goes for the magic -x option which specifies the
+# language that the files to be compiled are written in. This is mostly
+# relevant for c++ headers.
+# For a C project, you would set this to 'c' instead of 'c++'.
+'-x', 'c++',
+'-I', 'include',
+'-isystem', '/usr/include',
+'-isystem', '/usr/local/include',
+]
+
+
+# Set this to the absolute path to the folder (NOT the file!) containing the
+# compile_commands.json file to use that instead of 'flags'. See here for
+# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
+#
+# Most projects will NOT need to set this to anything; you can just change the
+# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
+compilation_database_folder = ''
+
+if os.path.exists( compilation_database_folder ):
+ database = ycm_core.CompilationDatabase( compilation_database_folder )
+else:
+ database = None
+
+SOURCE_EXTENSIONS = [ '.cc' ]
+
+def DirectoryOfThisScript():
+ return os.path.dirname( os.path.abspath( __file__ ) )
+
+
+def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
+ if not working_directory:
+ return list( flags )
+ new_flags = []
+ make_next_absolute = False
+ path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
+ for flag in flags:
+ new_flag = flag
+
+ if make_next_absolute:
+ make_next_absolute = False
+ if not flag.startswith( '/' ):
+ new_flag = os.path.join( working_directory, flag )
+
+ for path_flag in path_flags:
+ if flag == path_flag:
+ make_next_absolute = True
+ break
+
+ if flag.startswith( path_flag ):
+ path = flag[ len( path_flag ): ]
+ new_flag = path_flag + os.path.join( working_directory, path )
+ break
+
+ if new_flag:
+ new_flags.append( new_flag )
+ return new_flags
+
+
+def IsHeaderFile( filename ):
+ extension = os.path.splitext( filename )[ 1 ]
+ return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
+
+
+def GetCompilationInfoForFile( filename ):
+ # The compilation_commands.json file generated by CMake does not have entries
+ # for header files. So we do our best by asking the db for flags for a
+ # corresponding source file, if any. If one exists, the flags for that file
+ # should be good enough.
+ if IsHeaderFile( filename ):
+ basename = os.path.splitext( filename )[ 0 ]
+ for extension in SOURCE_EXTENSIONS:
+ replacement_file = basename + extension
+ if os.path.exists( replacement_file ):
+ compilation_info = database.GetCompilationInfoForFile(
+ replacement_file )
+ if compilation_info.compiler_flags_:
+ return compilation_info
+ return None
+ return database.GetCompilationInfoForFile( filename )
+
+
+def FlagsForFile( filename, **kwargs ):
+ if database:
+ # Bear in mind that compilation_info.compiler_flags_ does NOT return a
+ # python list, but a "list-like" StringVec object
+ compilation_info = GetCompilationInfoForFile( filename )
+ if not compilation_info:
+ return None
+
+ final_flags = MakeRelativePathsInFlagsAbsolute(
+ compilation_info.compiler_flags_,
+ compilation_info.compiler_working_dir_ )
+ else:
+ relative_to = DirectoryOfThisScript()
+ final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
+
+ return {
+ 'flags': final_flags,
+ 'do_cache': True
+ }
diff --git a/lib/libcxx/utils/google-benchmark/AUTHORS b/lib/libcxx/utils/google-benchmark/AUTHORS
index daea1f66f07..09e2e0551ad 100644
--- a/lib/libcxx/utils/google-benchmark/AUTHORS
+++ b/lib/libcxx/utils/google-benchmark/AUTHORS
@@ -36,6 +36,7 @@ Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
+Ori Livneh <ori.livneh@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
diff --git a/lib/libcxx/utils/google-benchmark/CMakeLists.txt b/lib/libcxx/utils/google-benchmark/CMakeLists.txt
index 8ddacabb6e0..310c7ee9f6b 100644
--- a/lib/libcxx/utils/google-benchmark/CMakeLists.txt
+++ b/lib/libcxx/utils/google-benchmark/CMakeLists.txt
@@ -16,7 +16,11 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
-option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
+if(NOT MSVC)
+ option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
+else()
+ set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE)
+endif()
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
@@ -75,7 +79,7 @@ get_git_version(GIT_VERSION)
# Tell the user what versions we are using
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
-message("-- Version: ${VERSION}")
+message(STATUS "Version: ${VERSION}")
# The version of the libraries
set(GENERIC_LIB_VERSION ${VERSION})
@@ -90,7 +94,7 @@ if (BENCHMARK_BUILD_32_BITS)
add_required_cxx_compiler_flag(-m32)
endif()
-if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+if (MSVC)
# Turn compiler warnings up to 11
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
@@ -99,6 +103,7 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-EHs-)
add_cxx_compiler_flag(-EHa-)
+ add_definitions(-D_HAS_EXCEPTIONS=0)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
@@ -163,7 +168,7 @@ else()
endif()
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
# (because of deprecated overload)
- add_cxx_compiler_flag(-wd654)
+ add_cxx_compiler_flag(-wd654)
add_cxx_compiler_flag(-Wthread-safety)
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
@@ -189,7 +194,7 @@ else()
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
- elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
+ elseif("${CMAKE_C_COMPILER_ID}" MATCHES "Clang")
include(llvm-toolchain)
endif()
endif()
@@ -214,12 +219,12 @@ else()
endif()
if (BENCHMARK_USE_LIBCXX)
- if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
add_cxx_compiler_flag(-stdlib=libc++)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
add_cxx_compiler_flag(-nostdinc++)
- message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
+ message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
# configuration checks such as 'find_package(Threads)'
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
diff --git a/lib/libcxx/utils/google-benchmark/CONTRIBUTORS b/lib/libcxx/utils/google-benchmark/CONTRIBUTORS
index 2ff2f2a8fa0..ee74ff886c0 100644
--- a/lib/libcxx/utils/google-benchmark/CONTRIBUTORS
+++ b/lib/libcxx/utils/google-benchmark/CONTRIBUTORS
@@ -27,6 +27,7 @@ Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
+Cyrille Faucheux <cyrille.faucheux@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
@@ -50,6 +51,7 @@ Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
+Ori Livneh <ori.livneh@gmail.com>
Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
diff --git a/lib/libcxx/utils/google-benchmark/README.md b/lib/libcxx/utils/google-benchmark/README.md
index 80e69f6e10d..858ea2334ef 100644
--- a/lib/libcxx/utils/google-benchmark/README.md
+++ b/lib/libcxx/utils/google-benchmark/README.md
@@ -6,11 +6,9 @@
A library to support the benchmarking of functions, similar to unit-tests.
-Discussion group: https://groups.google.com/d/forum/benchmark-discuss
+[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
-IRC channel: https://freenode.net #googlebenchmark
-
-[Known issues and common problems](#known-issues)
+IRC channel: [freenode](https://freenode.net) #googlebenchmark
[Additional Tooling Documentation](docs/tools.md)
@@ -47,11 +45,10 @@ to `CMAKE_ARGS`.
For Ubuntu and Debian Based System
-First make sure you have git and cmake installed (If not please install it)
+First make sure you have git and cmake installed (If not please install them)
```
-sudo apt-get install git
-sudo apt-get install cmake
+sudo apt-get install git cmake
```
Now, let's clone the repository and build it
@@ -59,22 +56,20 @@ Now, let's clone the repository and build it
```
git clone https://github.com/google/benchmark.git
cd benchmark
-git clone https://github.com/google/googletest.git
+# If you want to build tests and don't use BENCHMARK_DOWNLOAD_DEPENDENCIES, then
+# git clone https://github.com/google/googletest.git
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=RELEASE
make
```
-We need to install the library globally now
+If you need to install the library globally
```
sudo make install
```
-Now you have google/benchmark installed in your machine
-Note: Don't forget to link to pthread library while building
-
## Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
@@ -87,15 +82,16 @@ to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
-## Prerequisite knowledge
-
-Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here:
-https://github.com/google/googletest/blob/master/googletest/docs/primer.md
+## Further knowledge
+It may help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md)
+as some of the structural aspects of the APIs are similar.
## Example usage
### Basic usage
-Define a function that executes the code to be measured.
+Define a function that executes the code to be measured, register it as a
+benchmark function using the `BENCHMARK` macro, and ensure an appropriate `main`
+function is available:
```c++
#include <benchmark/benchmark.h>
@@ -123,7 +119,23 @@ Don't forget to inform your linker to add benchmark library e.g. through
`BENCHMARK_MAIN();` at the end of the source file and link against
`-lbenchmark_main` to get the same default behavior.
-The benchmark library will reporting the timing for the code within the `for(...)` loop.
+The benchmark library will measure and report the timing for code within the
+`for(...)` loop.
+
+#### Platform-specific libraries
+When the library is built using GCC it is necessary to link with the pthread
+library due to how GCC implements `std::thread`. Failing to link to pthread will
+lead to runtime exceptions (unless you're using libc++), not linker errors. See
+[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
+can link to pthread by adding `-pthread` to your linker command. Note, you can
+also use `-lpthread`, but there are potential issues with ordering of command
+line parameters if you use that.
+
+If you're running benchmarks on Windows, the shlwapi library (`-lshlwapi`) is
+also required.
+
+If you're running benchmarks on solaris, you'll want the kstat library linked in
+too (`-lkstat`).
### Passing arguments
Sometimes a family of benchmarks can be implemented with just one routine that
@@ -243,7 +255,7 @@ that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
- ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
+ ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
```
### Templated benchmarks
@@ -252,7 +264,7 @@ messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
absence of multiprogramming.
```c++
-template <class Q> int BM_Sequential(benchmark::State& state) {
+template <class Q> void BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
for (auto _ : state) {
@@ -416,6 +428,26 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
Without `UseRealTime`, CPU time is used by default.
+## Controlling timers
+Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
+is measured. But sometimes, it is nessesary to do some work inside of
+that loop, every iteration, but without counting that time to the benchmark time.
+That is possible, althought it is not recommended, since it has high overhead.
+
+```c++
+static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
+ std::set<int> data;
+ for (auto _ : state) {
+ state.PauseTiming(); // Stop timers. They will not count until they are resumed.
+ data = ConstructRandomSet(state.range(0)); // Do something that should not be measured
+ state.ResumeTiming(); // And resume timers. They are now counting again.
+ // The rest will be measured.
+ for (int j = 0; j < state.range(1); ++j)
+ data.insert(RandomNumber());
+ }
+}
+BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
+```
## Manual timing
For benchmarking something for which neither CPU time nor real-time are
@@ -522,15 +554,7 @@ order to manually set the time unit, you can specify it manually:
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
```
-## Controlling number of iterations
-In all cases, the number of iterations for which the benchmark is run is
-governed by the amount of time the benchmark takes. Concretely, the number of
-iterations is at least one, not more than 1e9, until CPU time is greater than
-the minimum time, or the wallclock time is 5x minimum time. The minimum time is
-set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
-the registered benchmark object.
-
-## Reporting the mean, median and standard deviation by repeated benchmarks
+### Reporting the mean, median and standard deviation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
@@ -541,12 +565,20 @@ The number of runs of each benchmark is specified globally by the
`Repetitions` on the registered benchmark object. When a benchmark is run more
than once the mean, median and standard deviation of the runs will be reported.
-Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
-`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
-are reported. By default the result of each repeated run is reported. When this
-option is `true` only the mean, median and standard deviation of the runs is reported.
-Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
-the value of the flag for that benchmark.
+Additionally the `--benchmark_report_aggregates_only={true|false}`,
+`--benchmark_display_aggregates_only={true|false}` flags or
+`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be
+used to change how repeated tests are reported. By default the result of each
+repeated run is reported. When `report aggregates only` option is `true`,
+only the aggregates (i.e. mean, median and standard deviation, maybe complexity
+measurements if they were requested) of the runs is reported, to both the
+reporters - standard output (console), and the file.
+However when only the `display aggregates only` option is `true`,
+only the aggregates are displayed in the standard output, while the file
+output still contains everything.
+Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
+registered benchmark object overrides the value of the appropriate flag for that
+benchmark.
## User-defined statistics for repeated benchmarks
While having mean, median and standard deviation is nice, this may not be
@@ -653,9 +685,12 @@ In multithreaded benchmarks, each counter is set on the calling thread only.
When the benchmark finishes, the counters from each thread will be summed;
the resulting sum is the value which will be shown for the benchmark.
-The `Counter` constructor accepts two parameters: the value as a `double`
-and a bit flag which allows you to show counters as rates and/or as
-per-thread averages:
+The `Counter` constructor accepts three parameters: the value as a `double`
+; a bit flag which allows you to show counters as rates, and/or as per-thread
+iteration, and/or as per-thread averages, and/or iteration invariants;
+and a flag specifying the 'unit' - i.e. is 1k a 1000 (default,
+`benchmark::Counter::OneK::kIs1000`), or 1024
+(`benchmark::Counter::OneK::kIs1024`)?
```c++
// sets a simple counter
@@ -671,6 +706,9 @@ per-thread averages:
// There's also a combined flag:
state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
+
+ // This says that we process with the rate of state.range(0) bytes every iteration:
+ state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
```
When you're compiling in C++11 mode or later you can use `insert()` with
@@ -810,8 +848,29 @@ BM_memcpy/32 12 ns 12 ns 54687500
BM_memcpy/32k 1834 ns 1837 ns 357143
```
+## Runtime and reporting considerations
+When the benchmark binary is executed, each benchmark function is run serially.
+The number of iterations to run is determined dynamically by running the
+benchmark a few times and measuring the time taken and ensuring that the
+ultimate result will be statistically stable. As such, faster benchmark
+functions will be run for more iterations than slower benchmark functions, and
+the number of iterations is thus reported.
+
+In all cases, the number of iterations for which the benchmark is run is
+governed by the amount of time the benchmark takes. Concretely, the number of
+iterations is at least one, not more than 1e9, until CPU time is greater than
+the minimum time, or the wallclock time is 5x minimum time. The minimum time is
+set per benchmark by calling `MinTime` on the registered benchmark object.
+
+Average timings are then reported over the iterations run. If multiple
+repetitions are requested using the `--benchmark_repetitions` command-line
+option, or at registration time, the benchmark function will be run several
+times and statistical results across these repetitions will also be reported.
+
+As well as the per-benchmark entries, a preamble in the report will include
+information about the machine on which the benchmarks are run.
-## Output Formats
+### Output Formats
The library supports multiple output formats. Use the
`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
is the default format.
@@ -879,14 +938,19 @@ name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
```
-## Output Files
+### Output Files
The library supports writing the output of the benchmark to a file specified
by `--benchmark_out=<filename>`. The format of the output can be specified
using `--benchmark_out_format={json|console|csv}`. Specifying
`--benchmark_out` does not suppress the console output.
+## Result comparison
+
+It is possible to compare the benchmarking results. See [Additional Tooling Documentation](docs/tools.md)
+
## Debug vs Release
-By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
+By default, benchmark builds as a debug library. You will see a warning in the
+output when this is the case. To build it as a release library instead, use:
```
cmake -DCMAKE_BUILD_TYPE=Release
@@ -898,16 +962,11 @@ To enable link-time optimisation, use
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
```
-If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails.
-If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
-
-## Linking against the library
+If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
+cache variables, if autodetection fails.
-When the library is built using GCC it is necessary to link with `-pthread`,
-due to how GCC implements `std::thread`.
-
-For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors.
-See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
+If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
+`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
## Compiler Support
@@ -937,14 +996,3 @@ sudo cpupower frequency-set --governor performance
./mybench
sudo cpupower frequency-set --governor powersave
```
-
-# Known Issues
-
-### Windows with CMake
-
-* Users must manually link `shlwapi.lib`. Failure to do so may result
-in unresolved symbols.
-
-### Solaris
-
-* Users must explicitly link with kstat library (-lkstat compilation flag).
diff --git a/lib/libcxx/utils/google-benchmark/WORKSPACE b/lib/libcxx/utils/google-benchmark/WORKSPACE
new file mode 100644
index 00000000000..54734f1ea55
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/WORKSPACE
@@ -0,0 +1,7 @@
+workspace(name = "com_github_google_benchmark")
+
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
+ strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
+)
diff --git a/lib/libcxx/utils/google-benchmark/appveyor.yml b/lib/libcxx/utils/google-benchmark/appveyor.yml
new file mode 100644
index 00000000000..cf240190bea
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/appveyor.yml
@@ -0,0 +1,50 @@
+version: '{build}'
+
+image: Visual Studio 2017
+
+configuration:
+ - Debug
+ - Release
+
+environment:
+ matrix:
+ - compiler: msvc-15-seh
+ generator: "Visual Studio 15 2017"
+
+ - compiler: msvc-15-seh
+ generator: "Visual Studio 15 2017 Win64"
+
+ - compiler: msvc-14-seh
+ generator: "Visual Studio 14 2015"
+
+ - compiler: msvc-14-seh
+ generator: "Visual Studio 14 2015 Win64"
+
+ - compiler: gcc-5.3.0-posix
+ generator: "MinGW Makefiles"
+ cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+
+matrix:
+ fast_finish: true
+
+install:
+ # git bash conflicts with MinGW makefiles
+ - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
+ - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
+
+build_script:
+ - md _build -Force
+ - cd _build
+ - echo %configuration%
+ - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
+ - cmake --build . --config %configuration%
+
+test_script:
+ - ctest -c %configuration% --timeout 300 --output-on-failure
+
+artifacts:
+ - path: '_build/CMakeFiles/*.log'
+ name: logs
+ - path: '_build/Testing/**/*.xml'
+ name: test_results
diff --git a/lib/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake b/lib/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
index c4c4d660f1e..99b56dd6239 100644
--- a/lib/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
+++ b/lib/libcxx/utils/google-benchmark/cmake/CXXFeatureCheck.cmake
@@ -28,7 +28,7 @@ function(cxx_feature_check FILE)
endif()
if (NOT DEFINED COMPILE_${FEATURE})
- message("-- Performing Test ${FEATURE}")
+ message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
@@ -42,7 +42,7 @@ function(cxx_feature_check FILE)
set(RUN_${FEATURE} 1)
endif()
else()
- message("-- Performing Test ${FEATURE}")
+ message(STATUS "Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
@@ -51,14 +51,14 @@ function(cxx_feature_check FILE)
endif()
if(RUN_${FEATURE} EQUAL 0)
- message("-- Performing Test ${FEATURE} -- success")
+ message(STATUS "Performing Test ${FEATURE} -- success")
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
- message("-- Performing Test ${FEATURE} -- failed to compile")
+ message(STATUS "Performing Test ${FEATURE} -- failed to compile")
else()
- message("-- Performing Test ${FEATURE} -- compiled but failed to run")
+ message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run")
endif()
endif()
endfunction()
diff --git a/lib/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake b/lib/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
index 88cebe3a1ca..4f10f226d7a 100644
--- a/lib/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
+++ b/lib/libcxx/utils/google-benchmark/cmake/GetGitVersion.cmake
@@ -49,6 +49,6 @@ function(get_git_version var)
set(GIT_VERSION "v0.0.0")
endif()
- message("-- git Version: ${GIT_VERSION}")
+ message(STATUS "git Version: ${GIT_VERSION}")
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()
diff --git a/lib/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake b/lib/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake
index 7ce1a633d65..b9c14436dbf 100644
--- a/lib/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake
+++ b/lib/libcxx/utils/google-benchmark/cmake/HandleGTest.cmake
@@ -5,7 +5,7 @@ macro(build_external_gtest)
include(ExternalProject)
set(GTEST_FLAGS "")
if (BENCHMARK_USE_LIBCXX)
- if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
list(APPEND GTEST_FLAGS -stdlib=libc++)
else()
message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++")
@@ -76,11 +76,11 @@ macro(build_external_gtest)
endmacro(build_external_gtest)
if (BENCHMARK_ENABLE_GTEST_TESTS)
- if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
- set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest")
+ if (IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/googletest)
+ set(GTEST_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/googletest")
set(INSTALL_GTEST OFF CACHE INTERNAL "")
set(INSTALL_GMOCK OFF CACHE INTERNAL "")
- add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/googletest)
set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main)
foreach(HEADER test mock)
# CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we
diff --git a/lib/libcxx/utils/google-benchmark/docs/tools.md b/lib/libcxx/utils/google-benchmark/docs/tools.md
index 70500bd3223..4a3b2e9bd2c 100644
--- a/lib/libcxx/utils/google-benchmark/docs/tools.md
+++ b/lib/libcxx/utils/google-benchmark/docs/tools.md
@@ -1,84 +1,25 @@
# Benchmark Tools
-## compare_bench.py
-
-The `compare_bench.py` utility which can be used to compare the result of benchmarks.
-The program is invoked like:
-
-``` bash
-$ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
-```
-
-Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
-
-`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
-
-The sample output using the JSON test files under `Inputs/` gives:
-
-``` bash
-$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
-Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
-Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------
-BM_SameTimes +0.0000 +0.0000 10 10 10 10
-BM_2xFaster -0.5000 -0.5000 50 25 50 25
-BM_2xSlower +1.0000 +1.0000 50 100 50 100
-BM_1PercentFaster -0.0100 -0.0100 100 99 100 99
-BM_1PercentSlower +0.0100 +0.0100 100 101 100 101
-BM_10PercentFaster -0.1000 -0.1000 100 90 100 90
-BM_10PercentSlower +0.1000 +0.1000 100 110 100 110
-BM_100xSlower +99.0000 +99.0000 100 10000 100 10000
-BM_100xFaster -0.9900 -0.9900 10000 100 10000 100
-BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90
-BM_ThirdFaster -0.3333 -0.3334 100 67 100 67
-BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1
-```
-
-As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
+## compare.py
-When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
+The `compare.py` can be used to compare the result of benchmarks.
-```
-./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
-RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a
-Run on (8 X 4000 MHz CPU s)
-2017-11-07 23:28:36
----------------------------------------------------------------------
-Benchmark Time CPU Iterations
----------------------------------------------------------------------
-BM_empty 4 ns 4 ns 170178757
-BM_empty/threads:8 1 ns 7 ns 103868920
-BM_empty_stop_start 0 ns 0 ns 1000000000
-BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720
-RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8
-Run on (8 X 4000 MHz CPU s)
-2017-11-07 23:28:38
----------------------------------------------------------------------
-Benchmark Time CPU Iterations
----------------------------------------------------------------------
-BM_empty 4 ns 4 ns 169534855
-BM_empty/threads:8 1 ns 7 ns 104188776
-BM_empty_stop_start 0 ns 0 ns 1000000000
-BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424
-Comparing ../build/test/basic_test to ../build/test/basic_test
-Benchmark Time CPU Time Old Time New CPU Old CPU New
----------------------------------------------------------------------------------------------------------------------
-BM_empty -0.0048 -0.0049 4 4 4 4
-BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7
-BM_empty_stop_start -0.0000 -0.0000 0 0 0 0
-BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0
+**NOTE**: the utility relies on the scipy package which can be installed using [these instructions](https://www.scipy.org/install.html).
-```
+### Displaying aggregates only
-As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
-Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.
+The switch `-a` / `--display_aggregates_only` can be used to control the
+displayment of the normal iterations vs the aggregates. When passed, it will
+be passthrough to the benchmark binaries to be run, and will be accounted for
+in the tool itself; only the aggregates will be displayed, but not normal runs.
+It only affects the display, the separate runs will still be used to calculate
+the U test.
-## compare.py
+### Modes of operation
-The `compare.py` can be used to compare the result of benchmarks.
There are three modes of operation:
-1. Just compare two benchmarks, what `compare_bench.py` did.
+1. Just compare two benchmarks
The program is invoked like:
``` bash
@@ -240,3 +181,19 @@ Benchmark Time CPU Time Old
```
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
+
+### U test
+
+If there is a sufficient repetition count of the benchmarks, the tool can do
+a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the
+null hypothesis that it is equally likely that a randomly selected value from
+one sample will be less than or greater than a randomly selected value from a
+second sample.
+
+If the calculated p-value is below this value is lower than the significance
+level alpha, then the result is said to be statistically significant and the
+null hypothesis is rejected. Which in other words means that the two benchmarks
+aren't identical.
+
+**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be
+meaningful!
diff --git a/lib/libcxx/utils/google-benchmark/include/benchmark/benchmark.h b/lib/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
index 193fffc4bea..a0fd7c6e1ca 100644
--- a/lib/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
+++ b/lib/libcxx/utils/google-benchmark/include/benchmark/benchmark.h
@@ -241,8 +241,21 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
+ #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+ #define BENCHMARK_UNREACHABLE() __assume(false)
+#else
+ #define BENCHMARK_UNREACHABLE() ((void)0)
+#endif
+
namespace benchmark {
class BenchmarkReporter;
+class MemoryManager;
void Initialize(int* argc, char** argv);
@@ -255,7 +268,7 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
// of each matching benchmark. Otherwise run each matching benchmark and
// report the results.
//
-// The second and third overload use the specified 'console_reporter' and
+// The second and third overload use the specified 'display_reporter' and
// 'file_reporter' respectively. 'file_reporter' will write to the file
// specified
// by '--benchmark_output'. If '--benchmark_output' is not given the
@@ -263,16 +276,13 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
//
// RETURNS: The number of matching benchmarks.
size_t RunSpecifiedBenchmarks();
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter);
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter);
-// If this routine is called, peak memory allocation past this point in the
-// benchmark is reported at the end of the benchmark report line. (It is
-// computed by running the benchmark once with a single iteration and a memory
-// tracer.)
-// TODO(dominic)
-// void MemoryUsage();
+// Register a MemoryManager instance that will be used to collect and report
+// allocation measurements for benchmark runs.
+void RegisterMemoryManager(MemoryManager* memory_manager);
namespace internal {
class Benchmark;
@@ -363,11 +373,20 @@ class Counter {
kAvgIterationsRate = kIsRate | kAvgIterations
};
+ enum OneK {
+ // 1'000 items per 1k
+ kIs1000 = 1000,
+ // 1'024 items per 1k
+ kIs1024 = 1024
+ };
+
double value;
Flags flags;
+ OneK oneK;
BENCHMARK_ALWAYS_INLINE
- Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {}
+ Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
+ : value(v), flags(f), oneK(k) {}
BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
@@ -406,22 +425,35 @@ struct Statistics {
std::string name_;
StatisticsFunc* compute_;
- Statistics(std::string name, StatisticsFunc* compute)
+ Statistics(const std::string& name, StatisticsFunc* compute)
: name_(name), compute_(compute) {}
};
namespace internal {
+struct BenchmarkInstance;
class ThreadTimer;
class ThreadManager;
-enum ReportMode
+enum AggregationReportMode
#if defined(BENCHMARK_HAS_CXX11)
: unsigned
#else
#endif
-{ RM_Unspecified, // The mode has not been manually specified
- RM_Default, // The mode is user-specified as default.
- RM_ReportAggregatesOnly };
+{
+ // The mode has not been manually specified
+ ARM_Unspecified = 0,
+ // The mode is user-specified.
+ // This may or may not be set when the following bit-flags are set.
+ ARM_Default = 1U << 0U,
+ // File reporter should only output aggregates.
+ ARM_FileReportAggregatesOnly = 1U << 1U,
+ // Display reporter should only output aggregates
+ ARM_DisplayReportAggregatesOnly = 1U << 2U,
+ // Both reporters should only display aggregates.
+ ARM_ReportAggregatesOnly =
+ ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
+};
+
} // namespace internal
// State is passed to a running Benchmark and contains state for the
@@ -517,16 +549,21 @@ class State {
// Set the number of bytes processed by the current benchmark
// execution. This routine is typically called once at the end of a
- // throughput oriented benchmark. If this routine is called with a
- // value > 0, the report is printed in MB/sec instead of nanoseconds
- // per iteration.
+ // throughput oriented benchmark.
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
+ void SetBytesProcessed(int64_t bytes) {
+ counters["bytes_per_second"] =
+ Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
+ }
BENCHMARK_ALWAYS_INLINE
- int64_t bytes_processed() const { return bytes_processed_; }
+ int64_t bytes_processed() const {
+ if (counters.find("bytes_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("bytes_per_second"));
+ return 0;
+ }
// If this routine is called with complexity_n > 0 and complexity report is
// requested for the
@@ -546,10 +583,17 @@ class State {
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetItemsProcessed(int64_t items) { items_processed_ = items; }
+ void SetItemsProcessed(int64_t items) {
+ counters["items_per_second"] =
+ Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
+ }
BENCHMARK_ALWAYS_INLINE
- int64_t items_processed() const { return items_processed_; }
+ int64_t items_processed() const {
+ if (counters.find("items_per_second") != counters.end())
+ return static_cast<int64_t>(counters.at("items_per_second"));
+ return 0;
+ }
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
@@ -612,9 +656,6 @@ class State {
private: // items we don't need on the first cache line
std::vector<int64_t> range_;
- int64_t bytes_processed_;
- int64_t items_processed_;
-
int64_t complexity_n_;
public:
@@ -625,12 +666,11 @@ class State {
// Number of threads concurrently executing the benchmark.
const int threads;
- // TODO(EricWF) make me private
+ private:
State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager);
- private:
void StartKeepRunning();
// Implementation of KeepRunning() and KeepRunningBatch().
// is_batch must be true unless n is 1.
@@ -638,7 +678,8 @@ class State {
void FinishKeepRunning();
internal::ThreadTimer* timer_;
internal::ThreadManager* manager_;
- BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
+
+ friend struct internal::BenchmarkInstance;
};
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
@@ -827,8 +868,12 @@ class Benchmark {
// Specify if each repetition of the benchmark should be reported separately
// or if only the final statistics should be reported. If the benchmark
// is not repeated then the single result is always reported.
+ // Applies to *ALL* reporters (display and file).
Benchmark* ReportAggregatesOnly(bool value = true);
+ // Same as ReportAggregatesOnly(), but applies to display reporter only.
+ Benchmark* DisplayAggregatesOnly(bool value = true);
+
// If a particular benchmark is I/O bound, runs multiple threads internally or
// if for some reason CPU timings are not representative, call this method. If
// called, the elapsed time will be used to control how many iterations are
@@ -888,9 +933,6 @@ class Benchmark {
virtual void Run(State& state) = 0;
- // Used inside the benchmark implementation
- struct Instance;
-
protected:
explicit Benchmark(const char* name);
Benchmark(Benchmark const&);
@@ -902,7 +944,7 @@ class Benchmark {
friend class BenchmarkFamilies;
std::string name_;
- ReportMode report_mode_;
+ AggregationReportMode aggregation_report_mode_;
std::vector<std::string> arg_names_; // Args for all benchmark runs
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
TimeUnit time_unit_;
@@ -1242,6 +1284,7 @@ struct CPUInfo {
double cycles_per_second;
std::vector<CacheInfo> caches;
bool scaling_enabled;
+ std::vector<double> load_avg;
static const CPUInfo& Get();
@@ -1250,6 +1293,15 @@ struct CPUInfo {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
};
+//Adding Struct for System Information
+struct SystemInfo {
+ std::string name;
+ static const SystemInfo& Get();
+ private:
+ SystemInfo();
+ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
+};
+
// Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling
@@ -1259,6 +1311,7 @@ class BenchmarkReporter {
public:
struct Context {
CPUInfo const& cpu_info;
+ SystemInfo const& sys_info;
// The number of chars in the longest benchmark name.
size_t name_field_width;
static const char* executable_name;
@@ -1266,23 +1319,30 @@ class BenchmarkReporter {
};
struct Run {
+ enum RunType { RT_Iteration, RT_Aggregate };
+
Run()
- : error_occurred(false),
+ : run_type(RT_Iteration),
+ error_occurred(false),
iterations(1),
time_unit(kNanosecond),
real_accumulated_time(0),
cpu_accumulated_time(0),
- bytes_per_second(0),
- items_per_second(0),
max_heapbytes_used(0),
complexity(oNone),
complexity_lambda(),
complexity_n(0),
report_big_o(false),
report_rms(false),
- counters() {}
-
- std::string benchmark_name;
+ counters(),
+ has_memory_result(false),
+ allocs_per_iter(0.0),
+ max_bytes_used(0) {}
+
+ std::string benchmark_name() const;
+ std::string run_name;
+ RunType run_type; // is this a measurement, or an aggregate?
+ std::string aggregate_name;
std::string report_label; // Empty if not set by benchmark.
bool error_occurred;
std::string error_message;
@@ -1304,10 +1364,6 @@ class BenchmarkReporter {
// accumulated time.
double GetAdjustedCPUTime() const;
- // Zero if not set by benchmark.
- double bytes_per_second;
- double items_per_second;
-
// This is set to 0.0 if memory tracing is not enabled.
double max_heapbytes_used;
@@ -1324,6 +1380,11 @@ class BenchmarkReporter {
bool report_rms;
UserCounters counters;
+
+ // Memory metrics.
+ bool has_memory_result;
+ double allocs_per_iter;
+ int64_t max_bytes_used;
};
// Construct a BenchmarkReporter with the output stream set to 'std::cout'
@@ -1438,6 +1499,29 @@ class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a future rel
std::set<std::string> user_counter_names_;
};
+// If a MemoryManager is registered, it can be used to collect and report
+// allocation metrics for a run of the benchmark.
+class MemoryManager {
+ public:
+ struct Result {
+ Result() : num_allocs(0), max_bytes_used(0) {}
+
+ // The number of allocations made in total between Start and Stop.
+ int64_t num_allocs;
+
+ // The peak memory use between Start and Stop.
+ int64_t max_bytes_used;
+ };
+
+ virtual ~MemoryManager() {}
+
+ // Implement this to start recording allocation information.
+ virtual void Start() = 0;
+
+ // Implement this to stop recording and fill out the given Result structure.
+ virtual void Stop(Result* result) = 0;
+};
+
inline const char* GetTimeUnitString(TimeUnit unit) {
switch (unit) {
case kMillisecond:
@@ -1445,9 +1529,9 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
case kMicrosecond:
return "us";
case kNanosecond:
- default:
return "ns";
}
+ BENCHMARK_UNREACHABLE();
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
@@ -1457,9 +1541,9 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) {
case kMicrosecond:
return 1e6;
case kNanosecond:
- default:
return 1e9;
}
+ BENCHMARK_UNREACHABLE();
}
} // namespace benchmark
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark.cc b/lib/libcxx/utils/google-benchmark/src/benchmark.cc
index b14bc629143..aab07500af4 100644
--- a/lib/libcxx/utils/google-benchmark/src/benchmark.cc
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark.cc
@@ -14,6 +14,7 @@
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
+#include "benchmark_runner.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
@@ -34,6 +35,7 @@
#include <memory>
#include <string>
#include <thread>
+#include <utility>
#include "check.h"
#include "colorprint.h"
@@ -55,9 +57,9 @@ DEFINE_bool(benchmark_list_tests, false,
DEFINE_string(benchmark_filter, ".",
"A regular expression that specifies the set of benchmarks "
- "to execute. If this flag is empty, no benchmarks are run. "
- "If this flag is the string \"all\", all benchmarks linked "
- "into the process are run.");
+ "to execute. If this flag is empty, or if this flag is the "
+ "string \"all\", all benchmarks linked into the binary are "
+ "run.");
DEFINE_double(benchmark_min_time, 0.5,
"Minimum number of seconds we should run benchmark before "
@@ -72,10 +74,19 @@ DEFINE_int32(benchmark_repetitions, 1,
"The number of runs of each benchmark. If greater than 1, the "
"mean and standard deviation of the runs will be reported.");
-DEFINE_bool(benchmark_report_aggregates_only, false,
- "Report the result of each benchmark repetitions. When 'true' is "
- "specified only the mean, standard deviation, and other statistics "
- "are reported for repeated benchmarks.");
+DEFINE_bool(
+ benchmark_report_aggregates_only, false,
+ "Report the result of each benchmark repetitions. When 'true' is specified "
+ "only the mean, standard deviation, and other statistics are reported for "
+ "repeated benchmarks. Affects all reporters.");
+
+DEFINE_bool(
+ benchmark_display_aggregates_only, false,
+ "Display the result of each benchmark repetitions. When 'true' is "
+ "specified only the mean, standard deviation, and other statistics are "
+ "displayed for repeated benchmarks. Unlike "
+ "benchmark_report_aggregates_only, only affects the display reporter, but "
+ "*NOT* file reporter, which will still contain all the output.");
DEFINE_string(benchmark_format, "console",
"The format to use for console output. Valid values are "
@@ -103,193 +114,11 @@ DEFINE_int32(v, 0, "The level of verbose logging to output");
namespace benchmark {
-namespace {
-static const size_t kMaxIterations = 1000000000;
-} // end namespace
-
namespace internal {
+// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
-namespace {
-
-BenchmarkReporter::Run CreateRunReport(
- const benchmark::internal::Benchmark::Instance& b,
- const internal::ThreadManager::Result& results, double seconds) {
- // Create report about this benchmark run.
- BenchmarkReporter::Run report;
-
- report.benchmark_name = b.name;
- report.error_occurred = results.has_error_;
- report.error_message = results.error_message_;
- report.report_label = results.report_label_;
- // This is the total iterations across all threads.
- report.iterations = results.iterations;
- report.time_unit = b.time_unit;
-
- if (!report.error_occurred) {
- double bytes_per_second = 0;
- if (results.bytes_processed > 0 && seconds > 0.0) {
- bytes_per_second = (results.bytes_processed / seconds);
- }
- double items_per_second = 0;
- if (results.items_processed > 0 && seconds > 0.0) {
- items_per_second = (results.items_processed / seconds);
- }
-
- if (b.use_manual_time) {
- report.real_accumulated_time = results.manual_time_used;
- } else {
- report.real_accumulated_time = results.real_time_used;
- }
- report.cpu_accumulated_time = results.cpu_time_used;
- report.bytes_per_second = bytes_per_second;
- report.items_per_second = items_per_second;
- report.complexity_n = results.complexity_n;
- report.complexity = b.complexity;
- report.complexity_lambda = b.complexity_lambda;
- report.statistics = b.statistics;
- report.counters = results.counters;
- internal::Finish(&report.counters, results.iterations, seconds, b.threads);
- }
- return report;
-}
-
-// Execute one thread of benchmark b for the specified number of iterations.
-// Adds the stats collected for the thread into *total.
-void RunInThread(const benchmark::internal::Benchmark::Instance* b,
- size_t iters, int thread_id,
- internal::ThreadManager* manager) {
- internal::ThreadTimer timer;
- State st(iters, b->arg, thread_id, b->threads, &timer, manager);
- b->benchmark->Run(st);
- CHECK(st.iterations() >= st.max_iterations)
- << "Benchmark returned before State::KeepRunning() returned false!";
- {
- MutexLock l(manager->GetBenchmarkMutex());
- internal::ThreadManager::Result& results = manager->results;
- results.iterations += st.iterations();
- results.cpu_time_used += timer.cpu_time_used();
- results.real_time_used += timer.real_time_used();
- results.manual_time_used += timer.manual_time_used();
- results.bytes_processed += st.bytes_processed();
- results.items_processed += st.items_processed();
- results.complexity_n += st.complexity_length_n();
- internal::Increment(&results.counters, st.counters);
- }
- manager->NotifyThreadComplete();
-}
-
-std::vector<BenchmarkReporter::Run> RunBenchmark(
- const benchmark::internal::Benchmark::Instance& b,
- std::vector<BenchmarkReporter::Run>* complexity_reports) {
- std::vector<BenchmarkReporter::Run> reports; // return value
-
- const bool has_explicit_iteration_count = b.iterations != 0;
- size_t iters = has_explicit_iteration_count ? b.iterations : 1;
- std::unique_ptr<internal::ThreadManager> manager;
- std::vector<std::thread> pool(b.threads - 1);
- const int repeats =
- b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
- const bool report_aggregates_only =
- repeats != 1 &&
- (b.report_mode == internal::RM_Unspecified
- ? FLAGS_benchmark_report_aggregates_only
- : b.report_mode == internal::RM_ReportAggregatesOnly);
- for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
- for (;;) {
- // Try benchmark
- VLOG(2) << "Running " << b.name << " for " << iters << "\n";
-
- manager.reset(new internal::ThreadManager(b.threads));
- for (std::size_t ti = 0; ti < pool.size(); ++ti) {
- pool[ti] = std::thread(&RunInThread, &b, iters,
- static_cast<int>(ti + 1), manager.get());
- }
- RunInThread(&b, iters, 0, manager.get());
- manager->WaitForAllThreads();
- for (std::thread& thread : pool) thread.join();
- internal::ThreadManager::Result results;
- {
- MutexLock l(manager->GetBenchmarkMutex());
- results = manager->results;
- }
- manager.reset();
- // Adjust real/manual time stats since they were reported per thread.
- results.real_time_used /= b.threads;
- results.manual_time_used /= b.threads;
-
- VLOG(2) << "Ran in " << results.cpu_time_used << "/"
- << results.real_time_used << "\n";
-
- // Base decisions off of real time if requested by this benchmark.
- double seconds = results.cpu_time_used;
- if (b.use_manual_time) {
- seconds = results.manual_time_used;
- } else if (b.use_real_time) {
- seconds = results.real_time_used;
- }
-
- const double min_time =
- !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
-
- // clang-format off
- // turn off clang-format since it mangles prettiness here
- // Determine if this run should be reported; Either it has
- // run for a sufficient amount of time or because an error was reported.
- const bool should_report = repetition_num > 0
- || has_explicit_iteration_count // An exact iteration count was requested
- || results.has_error_
- || iters >= kMaxIterations // No chance to try again, we hit the limit.
- || seconds >= min_time // the elapsed time is large enough
- // CPU time is specified but the elapsed real time greatly exceeds the
- // minimum time. Note that user provided timers are except from this
- // sanity check.
- || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
- // clang-format on
-
- if (should_report) {
- BenchmarkReporter::Run report = CreateRunReport(b, results, seconds);
- if (!report.error_occurred && b.complexity != oNone)
- complexity_reports->push_back(report);
- reports.push_back(report);
- break;
- }
-
- // See how much iterations should be increased by
- // Note: Avoid division by zero with max(seconds, 1ns).
- double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
- // If our last run was at least 10% of FLAGS_benchmark_min_time then we
- // use the multiplier directly. Otherwise we use at most 10 times
- // expansion.
- // NOTE: When the last run was at least 10% of the min time the max
- // expansion should be 14x.
- bool is_significant = (seconds / min_time) > 0.1;
- multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
- if (multiplier <= 1.0) multiplier = 2.0;
- double next_iters = std::max(multiplier * iters, iters + 1.0);
- if (next_iters > kMaxIterations) {
- next_iters = kMaxIterations;
- }
- VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
- iters = static_cast<int>(next_iters + 0.5);
- }
- }
- // Calculate additional statistics
- auto stat_reports = ComputeStats(reports);
- if ((b.complexity != oNone) && b.last_benchmark_instance) {
- auto additional_run_stats = ComputeBigO(*complexity_reports);
- stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
- additional_run_stats.end());
- complexity_reports->clear();
- }
-
- if (report_aggregates_only) reports.clear();
- reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
- return reports;
-}
-
-} // namespace
} // namespace internal
State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
@@ -302,8 +131,6 @@ State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
finished_(false),
error_occurred_(false),
range_(ranges),
- bytes_processed_(0),
- items_processed_(0),
complexity_n_(0),
counters(),
thread_index(thread_i),
@@ -394,25 +221,25 @@ void State::FinishKeepRunning() {
namespace internal {
namespace {
-void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
- BenchmarkReporter* console_reporter,
+void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
+ BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
- CHECK(console_reporter != nullptr);
+ CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
- bool has_repetitions = FLAGS_benchmark_repetitions > 1;
+ bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
- for (const Benchmark::Instance& benchmark : benchmarks) {
+ for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
- has_repetitions |= benchmark.repetitions > 1;
+ might_have_aggregates |= benchmark.repetitions > 1;
for (const auto& Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
- if (has_repetitions) name_field_width += 1 + stat_field_width;
+ if (might_have_aggregates) name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
@@ -429,22 +256,36 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
std::flush(reporter->GetErrorStream());
};
- if (console_reporter->ReportContext(context) &&
+ if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
- flushStreams(console_reporter);
+ flushStreams(display_reporter);
flushStreams(file_reporter);
+
for (const auto& benchmark : benchmarks) {
- std::vector<BenchmarkReporter::Run> reports =
- RunBenchmark(benchmark, &complexity_reports);
- console_reporter->ReportRuns(reports);
- if (file_reporter) file_reporter->ReportRuns(reports);
- flushStreams(console_reporter);
+ RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
+
+ auto report = [&run_results](BenchmarkReporter* reporter,
+ bool report_aggregates_only) {
+ assert(reporter);
+ // If there are no aggregates, do output non-aggregates.
+ report_aggregates_only &= !run_results.aggregates_only.empty();
+ if (!report_aggregates_only)
+ reporter->ReportRuns(run_results.non_aggregates);
+ if (!run_results.aggregates_only.empty())
+ reporter->ReportRuns(run_results.aggregates_only);
+ };
+
+ report(display_reporter, run_results.display_report_aggregates_only);
+ if (file_reporter)
+ report(file_reporter, run_results.file_report_aggregates_only);
+
+ flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
- console_reporter->Finalize();
+ display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
- flushStreams(console_reporter);
+ flushStreams(display_reporter);
flushStreams(file_reporter);
}
@@ -471,15 +312,20 @@ bool IsZero(double n) {
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
- if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
- IsTruthyFlagValue(FLAGS_benchmark_color)) {
+ auto is_benchmark_color = [force_no_color] () -> bool {
+ if (force_no_color) {
+ return false;
+ }
+ if (FLAGS_benchmark_color == "auto") {
+ return IsColorTerminal();
+ }
+ return IsTruthyFlagValue(FLAGS_benchmark_color);
+ };
+ if (is_benchmark_color()) {
output_opts |= ConsoleReporter::OO_Color;
} else {
output_opts &= ~ConsoleReporter::OO_Color;
}
- if (force_no_color) {
- output_opts &= ~ConsoleReporter::OO_Color;
- }
if (FLAGS_benchmark_counters_tabular) {
output_opts |= ConsoleReporter::OO_Tabular;
} else {
@@ -494,11 +340,11 @@ size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
- return RunSpecifiedBenchmarks(console_reporter, nullptr);
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
+ return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
-size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
+size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
@@ -506,15 +352,15 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
// Setup the reporters
std::ofstream output_file;
- std::unique_ptr<BenchmarkReporter> default_console_reporter;
+ std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
- if (!console_reporter) {
- default_console_reporter = internal::CreateReporter(
+ if (!display_reporter) {
+ default_display_reporter = internal::CreateReporter(
FLAGS_benchmark_format, internal::GetOutputOptions());
- console_reporter = default_console_reporter.get();
+ display_reporter = default_display_reporter.get();
}
- auto& Out = console_reporter->GetOutputStream();
- auto& Err = console_reporter->GetErrorStream();
+ auto& Out = display_reporter->GetOutputStream();
+ auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
@@ -538,7 +384,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
file_reporter->SetErrorStream(&output_file);
}
- std::vector<internal::Benchmark::Instance> benchmarks;
+ std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
if (benchmarks.empty()) {
@@ -549,12 +395,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
if (FLAGS_benchmark_list_tests) {
for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
} else {
- internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
+ internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
+void RegisterMemoryManager(MemoryManager* manager) {
+ internal::memory_manager = manager;
+}
+
namespace internal {
void PrintUsageAndExit() {
@@ -564,7 +414,8 @@ void PrintUsageAndExit() {
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
- " [--benchmark_report_aggregates_only={true|false}\n"
+ " [--benchmark_report_aggregates_only={true|false}]\n"
+ " [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
@@ -588,6 +439,8 @@ void ParseCommandLineFlags(int* argc, char** argv) {
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
+ ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
+ &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc b/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc
new file mode 100644
index 00000000000..8d3108363b8
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.cc
@@ -0,0 +1,15 @@
+#include "benchmark_api_internal.h"
+
+namespace benchmark {
+namespace internal {
+
+State BenchmarkInstance::Run(
+ size_t iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const {
+ State st(iters, arg, thread_id, threads, timer, manager);
+ benchmark->Run(st);
+ return st;
+}
+
+} // internal
+} // benchmark
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.h b/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
index dd7a3ffe8cb..0524a85c01d 100644
--- a/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark_api_internal.h
@@ -2,10 +2,12 @@
#define BENCHMARK_API_INTERNAL_H
#include "benchmark/benchmark.h"
+#include "commandlineflags.h"
#include <cmath>
#include <iosfwd>
#include <limits>
+#include <memory>
#include <string>
#include <vector>
@@ -13,10 +15,10 @@ namespace benchmark {
namespace internal {
// Information kept per benchmark we may want to run
-struct Benchmark::Instance {
+struct BenchmarkInstance {
std::string name;
Benchmark* benchmark;
- ReportMode report_mode;
+ AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
@@ -31,10 +33,13 @@ struct Benchmark::Instance {
double min_time;
size_t iterations;
int threads; // Number of concurrent threads to us
+
+ State Run(size_t iters, int thread_id, internal::ThreadTimer* timer,
+ internal::ThreadManager* manager) const;
};
bool FindBenchmarksInternal(const std::string& re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool IsZero(double n);
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark_register.cc b/lib/libcxx/utils/google-benchmark/src/benchmark_register.cc
index 26a89721c78..f17f5b223ce 100644
--- a/lib/libcxx/utils/google-benchmark/src/benchmark_register.cc
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark_register.cc
@@ -78,7 +78,7 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
private:
@@ -107,7 +107,7 @@ void BenchmarkFamilies::ClearBenchmarks() {
}
bool BenchmarkFamilies::FindBenchmarks(
- std::string spec, std::vector<Benchmark::Instance>* benchmarks,
+ std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
@@ -152,10 +152,10 @@ bool BenchmarkFamilies::FindBenchmarks(
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
- Benchmark::Instance instance;
+ BenchmarkInstance instance;
instance.name = family->name_;
instance.benchmark = family.get();
- instance.report_mode = family->report_mode_;
+ instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
@@ -182,14 +182,19 @@ bool BenchmarkFamilies::FindBenchmarks(
}
}
- instance.name += StrFormat("%d", arg);
+ // we know that the args are always non-negative (see 'AddRange()'),
+ // thus print as 'unsigned'. BUT, do a cast due to the 32-bit builds.
+ instance.name += StrFormat("%lu", static_cast<unsigned long>(arg));
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
- if (family->iterations_ != 0)
- instance.name += StrFormat("/iterations:%d", family->iterations_);
+ if (family->iterations_ != 0) {
+ instance.name +=
+ StrFormat("/iterations:%lu",
+ static_cast<unsigned long>(family->iterations_));
+ }
if (family->repetitions_ != 0)
instance.name += StrFormat("/repeats:%d", family->repetitions_);
@@ -225,7 +230,7 @@ Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
- std::vector<Benchmark::Instance>* benchmarks,
+ std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
@@ -236,7 +241,7 @@ bool FindBenchmarksInternal(const std::string& re,
Benchmark::Benchmark(const char* name)
: name_(name),
- report_mode_(RM_Unspecified),
+ aggregation_report_mode_(ARM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
@@ -369,7 +374,23 @@ Benchmark* Benchmark::Repetitions(int n) {
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
- report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
+ aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
+ return this;
+}
+
+Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
+ // If we were called, the report mode is no longer 'unspecified', in any case.
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_Default);
+
+ if (value) {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
+ } else {
+ aggregation_report_mode_ = static_cast<AggregationReportMode>(
+ aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
+ }
+
return this;
}
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark_runner.cc b/lib/libcxx/utils/google-benchmark/src/benchmark_runner.cc
new file mode 100644
index 00000000000..38faeec8e3e
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark_runner.cc
@@ -0,0 +1,350 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark_runner.h"
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+#ifndef BENCHMARK_OS_WINDOWS
+#ifndef BENCHMARK_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "check.h"
+#include "colorprint.h"
+#include "commandlineflags.h"
+#include "complexity.h"
+#include "counter.h"
+#include "internal_macros.h"
+#include "log.h"
+#include "mutex.h"
+#include "re.h"
+#include "statistics.h"
+#include "string_util.h"
+#include "thread_manager.h"
+#include "thread_timer.h"
+
+namespace benchmark {
+
+namespace internal {
+
+MemoryManager* memory_manager = nullptr;
+
+namespace {
+
+static const size_t kMaxIterations = 1000000000;
+
+BenchmarkReporter::Run CreateRunReport(
+ const benchmark::internal::BenchmarkInstance& b,
+ const internal::ThreadManager::Result& results, size_t memory_iterations,
+ const MemoryManager::Result& memory_result, double seconds) {
+ // Create report about this benchmark run.
+ BenchmarkReporter::Run report;
+
+ report.run_name = b.name;
+ report.error_occurred = results.has_error_;
+ report.error_message = results.error_message_;
+ report.report_label = results.report_label_;
+ // This is the total iterations across all threads.
+ report.iterations = results.iterations;
+ report.time_unit = b.time_unit;
+
+ if (!report.error_occurred) {
+ if (b.use_manual_time) {
+ report.real_accumulated_time = results.manual_time_used;
+ } else {
+ report.real_accumulated_time = results.real_time_used;
+ }
+ report.cpu_accumulated_time = results.cpu_time_used;
+ report.complexity_n = results.complexity_n;
+ report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
+ report.statistics = b.statistics;
+ report.counters = results.counters;
+
+ if (memory_iterations > 0) {
+ report.has_memory_result = true;
+ report.allocs_per_iter =
+ memory_iterations ? static_cast<double>(memory_result.num_allocs) /
+ memory_iterations
+ : 0;
+ report.max_bytes_used = memory_result.max_bytes_used;
+ }
+
+ internal::Finish(&report.counters, results.iterations, seconds, b.threads);
+ }
+ return report;
+}
+
+// Execute one thread of benchmark b for the specified number of iterations.
+// Adds the stats collected for the thread into *total.
+void RunInThread(const BenchmarkInstance* b, size_t iters, int thread_id,
+ ThreadManager* manager) {
+ internal::ThreadTimer timer;
+ State st = b->Run(iters, thread_id, &timer, manager);
+ CHECK(st.iterations() >= st.max_iterations)
+ << "Benchmark returned before State::KeepRunning() returned false!";
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ internal::ThreadManager::Result& results = manager->results;
+ results.iterations += st.iterations();
+ results.cpu_time_used += timer.cpu_time_used();
+ results.real_time_used += timer.real_time_used();
+ results.manual_time_used += timer.manual_time_used();
+ results.complexity_n += st.complexity_length_n();
+ internal::Increment(&results.counters, st.counters);
+ }
+ manager->NotifyThreadComplete();
+}
+
+class BenchmarkRunner {
+ public:
+ BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ std::vector<BenchmarkReporter::Run>* complexity_reports_)
+ : b(b_),
+ complexity_reports(*complexity_reports_),
+ min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
+ repeats(b.repetitions != 0 ? b.repetitions
+ : FLAGS_benchmark_repetitions),
+ has_explicit_iteration_count(b.iterations != 0),
+ pool(b.threads - 1),
+ iters(has_explicit_iteration_count ? b.iterations : 1) {
+ run_results.display_report_aggregates_only =
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
+ run_results.file_report_aggregates_only =
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode != internal::ARM_Unspecified) {
+ run_results.display_report_aggregates_only =
+ (b.aggregation_report_mode &
+ internal::ARM_DisplayReportAggregatesOnly);
+ run_results.file_report_aggregates_only =
+ (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
+ }
+
+ for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
+ const bool is_the_first_repetition = repetition_num == 0;
+ DoOneRepetition(is_the_first_repetition);
+ }
+
+ // Calculate additional statistics
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+
+ // Maybe calculate complexity report
+ if ((b.complexity != oNone) && b.last_benchmark_instance) {
+ auto additional_run_stats = ComputeBigO(complexity_reports);
+ run_results.aggregates_only.insert(run_results.aggregates_only.end(),
+ additional_run_stats.begin(),
+ additional_run_stats.end());
+ complexity_reports.clear();
+ }
+ }
+
+ RunResults&& get_results() { return std::move(run_results); }
+
+ private:
+ RunResults run_results;
+
+ const benchmark::internal::BenchmarkInstance& b;
+ std::vector<BenchmarkReporter::Run>& complexity_reports;
+
+ const double min_time;
+ const int repeats;
+ const bool has_explicit_iteration_count;
+
+ std::vector<std::thread> pool;
+
+ size_t iters; // preserved between repetitions!
+ // So only the first repetition has to find/calculate it,
+ // the other repetitions will just use that precomputed iteration count.
+
+ struct IterationResults {
+ internal::ThreadManager::Result results;
+ size_t iters;
+ double seconds;
+ };
+ IterationResults DoNIterations() {
+ VLOG(2) << "Running " << b.name << " for " << iters << "\n";
+
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads));
+
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get());
+ }
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get());
+
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
+
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
+
+ // And get rid of the manager.
+ manager.reset();
+
+ // Adjust real/manual time stats since they were reported per thread.
+ i.results.real_time_used /= b.threads;
+ i.results.manual_time_used /= b.threads;
+
+ VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
+
+ // So for how long were we running?
+ i.iters = iters;
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time) {
+ i.seconds = i.results.real_time_used;
+ }
+
+ return i;
+ }
+
+ size_t PredictNumItersNeeded(const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ bool is_significant = (i.seconds / min_time) > 0.1;
+ multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier = 2.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const size_t max_next_iters =
+ 0.5 + std::max(multiplier * i.iters, i.iters + 1.0);
+ // But we do have *some* sanity limits though..
+ const size_t next_iters = std::min(max_next_iters, kMaxIterations);
+
+ VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+ }
+
+ bool ShouldReportIterationResults(const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.has_error_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >= min_time || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this sanity check.
+ ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
+ }
+
+ void DoOneRepetition(bool is_the_first_repetition) {
+ IterationResults i;
+
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ i = DoNIterations();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
+
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result memory_result;
+ size_t memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<size_t>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ RunInThread(&b, memory_iterations, 0, manager.get());
+ manager->WaitForAllThreads();
+ manager.reset();
+
+ memory_manager->Stop(&memory_result);
+ }
+
+ // Ok, now actualy report.
+ BenchmarkReporter::Run report = CreateRunReport(
+ b, i.results, memory_iterations, memory_result, i.seconds);
+
+ if (!report.error_occurred && b.complexity != oNone)
+ complexity_reports.push_back(report);
+
+ run_results.non_aggregates.push_back(report);
+ }
+};
+
+} // end namespace
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports) {
+ internal::BenchmarkRunner r(b, complexity_reports);
+ return r.get_results();
+}
+
+} // end namespace internal
+
+} // end namespace benchmark
diff --git a/lib/libcxx/utils/google-benchmark/src/benchmark_runner.h b/lib/libcxx/utils/google-benchmark/src/benchmark_runner.h
new file mode 100644
index 00000000000..96e8282a11a
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/src/benchmark_runner.h
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef BENCHMARK_RUNNER_H_
+#define BENCHMARK_RUNNER_H_
+
+#include "benchmark_api_internal.h"
+#include "internal_macros.h"
+
+DECLARE_double(benchmark_min_time);
+
+DECLARE_int32(benchmark_repetitions);
+
+DECLARE_bool(benchmark_report_aggregates_only);
+
+DECLARE_bool(benchmark_display_aggregates_only);
+
+namespace benchmark {
+
+namespace internal {
+
+extern MemoryManager* memory_manager;
+
+struct RunResults {
+ std::vector<BenchmarkReporter::Run> non_aggregates;
+ std::vector<BenchmarkReporter::Run> aggregates_only;
+
+ bool display_report_aggregates_only = false;
+ bool file_report_aggregates_only = false;
+};
+
+RunResults RunBenchmark(
+ const benchmark::internal::BenchmarkInstance& b,
+ std::vector<BenchmarkReporter::Run>* complexity_reports);
+
+} // namespace internal
+
+} // end namespace benchmark
+
+#endif // BENCHMARK_RUNNER_H_
diff --git a/lib/libcxx/utils/google-benchmark/src/colorprint.cc b/lib/libcxx/utils/google-benchmark/src/colorprint.cc
index 2dec4a8b28b..fff6a98818b 100644
--- a/lib/libcxx/utils/google-benchmark/src/colorprint.cc
+++ b/lib/libcxx/utils/google-benchmark/src/colorprint.cc
@@ -25,7 +25,7 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Windows.h>
+#include <windows.h>
#include <io.h>
#else
#include <unistd.h>
diff --git a/lib/libcxx/utils/google-benchmark/src/complexity.cc b/lib/libcxx/utils/google-benchmark/src/complexity.cc
index aafd538df21..6ef17660c95 100644
--- a/lib/libcxx/utils/google-benchmark/src/complexity.cc
+++ b/lib/libcxx/utils/google-benchmark/src/complexity.cc
@@ -73,8 +73,8 @@ std::string GetBigOString(BigO complexity) {
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
-// For a deeper explanation on the algorithm logic, look the README file at
-// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
+// For a deeper explanation on the algorithm logic, please refer to
+// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
@@ -182,12 +182,15 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
- std::string benchmark_name =
- reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
+
+ std::string run_name = reports[0].benchmark_name().substr(
+ 0, reports[0].benchmark_name().find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
- big_o.benchmark_name = benchmark_name + "_BigO";
+ big_o.run_name = run_name;
+ big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ big_o.aggregate_name = "BigO";
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
@@ -203,8 +206,10 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// Only add label to mean/stddev if it is same for all runs
Run rms;
+ rms.run_name = run_name;
big_o.report_label = reports[0].report_label;
- rms.benchmark_name = benchmark_name + "_RMS";
+ rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.real_accumulated_time = result_real.rms / multiplier;
diff --git a/lib/libcxx/utils/google-benchmark/src/console_reporter.cc b/lib/libcxx/utils/google-benchmark/src/console_reporter.cc
index 48920ca7829..ca364727cb4 100644
--- a/lib/libcxx/utils/google-benchmark/src/console_reporter.cc
+++ b/lib/libcxx/utils/google-benchmark/src/console_reporter.cc
@@ -53,7 +53,7 @@ bool ConsoleReporter::ReportContext(const Context& context) {
}
void ConsoleReporter::PrintHeader(const Run& run) {
- std::string str = FormatString("%-*s %13s %13s %10s", static_cast<int>(name_field_width_),
+ std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
@@ -98,6 +98,21 @@ static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
va_end(args);
}
+
+static std::string FormatTime(double time) {
+ // Align decimal places...
+ if (time < 1.0) {
+ return FormatString("%10.3f", time);
+ }
+ if (time < 10.0) {
+ return FormatString("%10.2f", time);
+ }
+ if (time < 100.0) {
+ return FormatString("%10.1f", time);
+ }
+ return FormatString("%10.0f", time);
+}
+
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
@@ -106,7 +121,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
- result.benchmark_name.c_str());
+ result.benchmark_name().c_str());
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
@@ -114,33 +129,24 @@ void ConsoleReporter::PrintRunData(const Run& result) {
printer(Out, COLOR_DEFAULT, "\n");
return;
}
- // Format bytes per second
- std::string rate;
- if (result.bytes_per_second > 0) {
- rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
- }
-
- // Format items per second
- std::string items;
- if (result.items_per_second > 0) {
- items =
- StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
- }
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
+ const std::string real_time_str = FormatTime(real_time);
+ const std::string cpu_time_str = FormatTime(cpu_time);
+
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
- printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(),
+ printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
- printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
- cpu_time * 100);
+ printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
+ cpu_time * 100, "%");
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
- printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
- cpu_time, timeLabel);
+ printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
+ cpu_time_str.c_str(), timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
@@ -150,7 +156,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
- auto const& s = HumanReadableNumber(c.second.value, 1000);
+ auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
if (output_options_ & OO_Tabular) {
if (c.second.flags & Counter::kIsRate) {
printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
@@ -164,14 +170,6 @@ void ConsoleReporter::PrintRunData(const Run& result) {
}
}
- if (!rate.empty()) {
- printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
- }
-
- if (!items.empty()) {
- printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
- }
-
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
diff --git a/lib/libcxx/utils/google-benchmark/src/csv_reporter.cc b/lib/libcxx/utils/google-benchmark/src/csv_reporter.cc
index 4a641909d80..d2f1d27eb62 100644
--- a/lib/libcxx/utils/google-benchmark/src/csv_reporter.cc
+++ b/lib/libcxx/utils/google-benchmark/src/csv_reporter.cc
@@ -49,6 +49,8 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
user_counter_names_.insert(cnt.first);
}
}
@@ -69,6 +71,8 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
+ if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
+ continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
@@ -88,7 +92,7 @@ void CSVReporter::PrintRunData(const Run& run) {
// Field with embedded double-quote characters must be doubled and the field
// delimited with double-quotes.
- std::string name = run.benchmark_name;
+ std::string name = run.benchmark_name();
ReplaceAll(&name, "\"", "\"\"");
Out << '"' << name << "\",";
if (run.error_occurred) {
@@ -117,12 +121,12 @@ void CSVReporter::PrintRunData(const Run& run) {
}
Out << ",";
- if (run.bytes_per_second > 0.0) {
- Out << run.bytes_per_second;
+ if (run.counters.find("bytes_per_second") != run.counters.end()) {
+ Out << run.counters.at("bytes_per_second");
}
Out << ",";
- if (run.items_per_second > 0.0) {
- Out << run.items_per_second;
+ if (run.counters.find("items_per_second") != run.counters.end()) {
+ Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
diff --git a/lib/libcxx/utils/google-benchmark/src/cycleclock.h b/lib/libcxx/utils/google-benchmark/src/cycleclock.h
index 00d57641676..f5e37b011b9 100644
--- a/lib/libcxx/utils/google-benchmark/src/cycleclock.h
+++ b/lib/libcxx/utils/google-benchmark/src/cycleclock.h
@@ -41,7 +41,7 @@ extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
-#ifndef BENCHMARK_OS_WINDOWS
+#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
#include <sys/time.h>
#include <time.h>
#endif
diff --git a/lib/libcxx/utils/google-benchmark/src/internal_macros.h b/lib/libcxx/utils/google-benchmark/src/internal_macros.h
index b7e9203ff60..5dbf4fd2752 100644
--- a/lib/libcxx/utils/google-benchmark/src/internal_macros.h
+++ b/lib/libcxx/utils/google-benchmark/src/internal_macros.h
@@ -11,9 +11,6 @@
#ifndef __has_feature
#define __has_feature(x) 0
#endif
-#ifndef __has_builtin
-#define __has_builtin(x) 0
-#endif
#if defined(__clang__)
#if !defined(COMPILER_CLANG)
@@ -43,6 +40,9 @@
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
+ #if defined(__MINGW32__)
+ #define BENCHMARK_OS_MINGW 1
+ #endif
#elif defined(__APPLE__)
#define BENCHMARK_OS_APPLE 1
#include "TargetConditionals.h"
@@ -87,14 +87,6 @@
#define BENCHMARK_MAYBE_UNUSED
#endif
-#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable)
- #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
-#elif defined(COMPILER_MSVC)
- #define BENCHMARK_UNREACHABLE() __assume(false)
-#else
- #define BENCHMARK_UNREACHABLE() ((void)0)
-#endif
-
// clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_
diff --git a/lib/libcxx/utils/google-benchmark/src/json_reporter.cc b/lib/libcxx/utils/google-benchmark/src/json_reporter.cc
index 611605af6b5..7d01e8e4e31 100644
--- a/lib/libcxx/utils/google-benchmark/src/json_reporter.cc
+++ b/lib/libcxx/utils/google-benchmark/src/json_reporter.cc
@@ -77,8 +77,15 @@ bool JSONReporter::ReportContext(const Context& context) {
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
+ out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
+
if (Context::executable_name) {
- out << indent << FormatKV("executable", Context::executable_name) << ",\n";
+ // windows uses backslash for its path separator,
+ // which must be escaped in JSON otherwise it blows up conforming JSON
+ // decoders
+ std::string executable_name = Context::executable_name;
+ ReplaceAll(&executable_name, "\\", "\\\\");
+ out << indent << FormatKV("executable", executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info;
@@ -111,6 +118,12 @@ bool JSONReporter::ReportContext(const Context& context) {
}
indent = std::string(4, ' ');
out << indent << "],\n";
+ out << indent << "\"load_avg\": [";
+ for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
+ out << *it++;
+ if (it != info.load_avg.end()) out << ",";
+ }
+ out << "],\n";
#if defined(NDEBUG)
const char build_type[] = "release";
@@ -154,7 +167,20 @@ void JSONReporter::Finalize() {
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
- out << indent << FormatKV("name", run.benchmark_name) << ",\n";
+ out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
+ out << indent << FormatKV("run_name", run.run_name) << ",\n";
+ out << indent << FormatKV("run_type", [&run]() -> const char* {
+ switch (run.run_type) {
+ case BenchmarkReporter::Run::RT_Iteration:
+ return "iteration";
+ case BenchmarkReporter::Run::RT_Aggregate:
+ return "aggregate";
+ }
+ BENCHMARK_UNREACHABLE();
+ }()) << ",\n";
+ if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
+ out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
+ }
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
@@ -175,17 +201,16 @@ void JSONReporter::PrintRunData(Run const& run) {
} else if (run.report_rms) {
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
- if (run.bytes_per_second > 0.0) {
- out << ",\n"
- << indent << FormatKV("bytes_per_second", run.bytes_per_second);
- }
- if (run.items_per_second > 0.0) {
- out << ",\n"
- << indent << FormatKV("items_per_second", run.items_per_second);
- }
+
for (auto& c : run.counters) {
out << ",\n" << indent << FormatKV(c.first, c.second);
}
+
+ if (run.has_memory_result) {
+ out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
+ out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
+ }
+
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
}
diff --git a/lib/libcxx/utils/google-benchmark/src/reporter.cc b/lib/libcxx/utils/google-benchmark/src/reporter.cc
index 541661a25f0..59bc5f71023 100644
--- a/lib/libcxx/utils/google-benchmark/src/reporter.cc
+++ b/lib/libcxx/utils/google-benchmark/src/reporter.cc
@@ -22,6 +22,7 @@
#include <vector>
#include "check.h"
+#include "string_util.h"
namespace benchmark {
@@ -54,6 +55,14 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "\n";
}
}
+ if (!info.load_avg.empty()) {
+ Out << "Load Average: ";
+ for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
+ Out << StrFormat("%.2f", *It++);
+ if (It != info.load_avg.end()) Out << ", ";
+ }
+ Out << "\n";
+ }
if (info.scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
@@ -70,7 +79,16 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
// No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name;
-BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
+BenchmarkReporter::Context::Context()
+ : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
+
+std::string BenchmarkReporter::Run::benchmark_name() const {
+ std::string name = run_name;
+ if (run_type == RT_Aggregate) {
+ name += "_" + aggregate_name;
+ }
+ return name;
+}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
diff --git a/lib/libcxx/utils/google-benchmark/src/sleep.cc b/lib/libcxx/utils/google-benchmark/src/sleep.cc
index 54aa04a4224..1512ac90f7e 100644
--- a/lib/libcxx/utils/google-benchmark/src/sleep.cc
+++ b/lib/libcxx/utils/google-benchmark/src/sleep.cc
@@ -21,7 +21,7 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Windows.h>
+#include <windows.h>
#endif
namespace benchmark {
diff --git a/lib/libcxx/utils/google-benchmark/src/statistics.cc b/lib/libcxx/utils/google-benchmark/src/statistics.cc
index 612dda2d1a7..e821aec18b7 100644
--- a/lib/libcxx/utils/google-benchmark/src/statistics.cc
+++ b/lib/libcxx/utils/google-benchmark/src/statistics.cc
@@ -91,13 +91,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
- std::vector<double> bytes_per_second_stat;
- std::vector<double> items_per_second_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
- bytes_per_second_stat.reserve(reports.size());
- items_per_second_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
@@ -123,13 +119,11 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Populate the accumulators.
for (Run const& run : reports) {
- CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
+ CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
- items_per_second_stat.emplace_back(run.items_per_second);
- bytes_per_second_stat.emplace_back(run.bytes_per_second);
// user counters
for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
@@ -147,24 +141,43 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
}
}
+ const double iteration_rescale_factor =
+ double(reports.size()) / double(run_iterations);
+
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
- data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
+ data.run_name = reports[0].benchmark_name();
+ data.run_type = BenchmarkReporter::Run::RT_Aggregate;
+ data.aggregate_name = Stat.name_;
data.report_label = report_label;
- data.iterations = run_iterations;
+
+ // It is incorrect to say that an aggregate is computed over
+ // run's iterations, because those iterations already got averaged.
+ // Similarly, if there are N repetitions with 1 iterations each,
+ // an aggregate will be computed over N measurements, not 1.
+ // Thus it is best to simply use the count of separate reports.
+ data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
- data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
- data.items_per_second = Stat.compute_(items_per_second_stat);
+
+ // We will divide these times by data.iterations when reporting, but the
+ // data.iterations is not nessesairly the scale of these measurements,
+ // because in each repetition, these timers are sum over all the iterations.
+ // And if we want to say that the stats are over N repetitions and not
+ // M iterations, we need to multiply these by (N/M).
+ data.real_accumulated_time *= iteration_rescale_factor;
+ data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const& kv : counter_stats) {
+ // Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
- auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
+ auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
+ counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
diff --git a/lib/libcxx/utils/google-benchmark/src/string_util.h b/lib/libcxx/utils/google-benchmark/src/string_util.h
index 4a5501273cf..fc5f8b0304b 100644
--- a/lib/libcxx/utils/google-benchmark/src/string_util.h
+++ b/lib/libcxx/utils/google-benchmark/src/string_util.h
@@ -12,7 +12,11 @@ void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
-std::string StrFormat(const char* format, ...);
+#ifdef __GNUC__
+__attribute__((format(printf, 1, 2)))
+#endif
+std::string
+StrFormat(const char* format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out;
diff --git a/lib/libcxx/utils/google-benchmark/src/sysinfo.cc b/lib/libcxx/utils/google-benchmark/src/sysinfo.cc
index 73064b97ba2..c0c07e5e62a 100644
--- a/lib/libcxx/utils/google-benchmark/src/sysinfo.cc
+++ b/lib/libcxx/utils/google-benchmark/src/sysinfo.cc
@@ -15,10 +15,11 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Shlwapi.h>
+#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
-#include <VersionHelpers.h>
-#include <Windows.h>
+#include <versionhelpers.h>
+#include <windows.h>
+#include <codecvt>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
@@ -52,6 +53,7 @@
#include <limits>
#include <memory>
#include <sstream>
+#include <locale>
#include "check.h"
#include "cycleclock.h"
@@ -288,7 +290,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
std::string name;
std::string type;
int level;
- size_t num_sharing;
+ uint64_t num_sharing;
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
@@ -366,6 +368,35 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
#endif
}
+std::string GetSystemName() {
+#if defined(BENCHMARK_OS_WINDOWS)
+ std::string str;
+ const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1;
+ TCHAR hostname[COUNT] = {'\0'};
+ DWORD DWCOUNT = COUNT;
+ if (!GetComputerName(hostname, &DWCOUNT))
+ return std::string("");
+#ifndef UNICODE
+ str = std::string(hostname, DWCOUNT);
+#else
+ //Using wstring_convert, Is deprecated in C++17
+ using convert_type = std::codecvt_utf8<wchar_t>;
+ std::wstring_convert<convert_type, wchar_t> converter;
+ std::wstring wStr(hostname, DWCOUNT);
+ str = converter.to_bytes(wStr);
+#endif
+ return str;
+#else // defined(BENCHMARK_OS_WINDOWS)
+#ifdef BENCHMARK_OS_MACOSX //Mac Doesnt have HOST_NAME_MAX defined
+#define HOST_NAME_MAX 64
+#endif
+ char hostname[HOST_NAME_MAX];
+ int retVal = gethostname(hostname, HOST_NAME_MAX);
+ if (retVal != 0) return std::string("");
+ return std::string(hostname);
+#endif // Catch-all POSIX block.
+}
+
int GetNumCPUs() {
#ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1;
@@ -404,7 +435,13 @@ int GetNumCPUs() {
if (ln.empty()) continue;
size_t SplitIdx = ln.find(':');
std::string value;
+#if defined(__s390__)
+ // s390 has another format in /proc/cpuinfo
+ // it needs to be parsed differently
+ if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
+#else
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
NumCPUs++;
if (!value.empty()) {
@@ -571,6 +608,24 @@ double GetCPUCyclesPerSecond() {
return static_cast<double>(cycleclock::Now() - start_ticks);
}
+std::vector<double> GetLoadAvg() {
+#if defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD
+ constexpr int kMaxSamples = 3;
+ std::vector<double> res(kMaxSamples, 0.0);
+ const int nelem = getloadavg(res.data(), kMaxSamples);
+ if (nelem < 1) {
+ res.clear();
+ } else {
+ res.resize(nelem);
+ }
+ return res;
+#else
+ return {};
+#endif
+}
+
} // end namespace
const CPUInfo& CPUInfo::Get() {
@@ -582,6 +637,14 @@ CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
cycles_per_second(GetCPUCyclesPerSecond()),
caches(GetCacheSizes()),
- scaling_enabled(CpuScalingEnabled(num_cpus)) {}
+ scaling_enabled(CpuScalingEnabled(num_cpus)),
+ load_avg(GetLoadAvg()) {}
+
+
+const SystemInfo& SystemInfo::Get() {
+ static const SystemInfo* info = new SystemInfo();
+ return *info;
+}
+SystemInfo::SystemInfo() : name(GetSystemName()) {}
} // end namespace benchmark
diff --git a/lib/libcxx/utils/google-benchmark/src/thread_manager.h b/lib/libcxx/utils/google-benchmark/src/thread_manager.h
index 82b4d72b62f..6e274c7ea6b 100644
--- a/lib/libcxx/utils/google-benchmark/src/thread_manager.h
+++ b/lib/libcxx/utils/google-benchmark/src/thread_manager.h
@@ -42,8 +42,6 @@ class ThreadManager {
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
- int64_t bytes_processed = 0;
- int64_t items_processed = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
diff --git a/lib/libcxx/utils/google-benchmark/src/timers.cc b/lib/libcxx/utils/google-benchmark/src/timers.cc
index 2010e2450b4..7613ff92c6e 100644
--- a/lib/libcxx/utils/google-benchmark/src/timers.cc
+++ b/lib/libcxx/utils/google-benchmark/src/timers.cc
@@ -16,10 +16,10 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
-#include <Shlwapi.h>
+#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
-#include <VersionHelpers.h>
-#include <Windows.h>
+#include <versionhelpers.h>
+#include <windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
diff --git a/lib/libcxx/utils/google-benchmark/test/AssemblyTests.cmake b/lib/libcxx/utils/google-benchmark/test/AssemblyTests.cmake
new file mode 100644
index 00000000000..3d078586f1d
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/test/AssemblyTests.cmake
@@ -0,0 +1,46 @@
+
+include(split_list)
+
+set(ASM_TEST_FLAGS "")
+check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
+if (BENCHMARK_HAS_O3_FLAG)
+ list(APPEND ASM_TEST_FLAGS -O3)
+endif()
+
+check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
+if (BENCHMARK_HAS_G0_FLAG)
+ list(APPEND ASM_TEST_FLAGS -g0)
+endif()
+
+check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
+if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
+ list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
+endif()
+
+split_list(ASM_TEST_FLAGS)
+string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
+
+macro(add_filecheck_test name)
+ cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
+ add_library(${name} OBJECT ${name}.cc)
+ set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
+ set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
+ add_custom_target(copy_${name} ALL
+ COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
+ $<TARGET_OBJECTS:${name}>
+ ${ASM_OUTPUT_FILE}
+ BYPRODUCTS ${ASM_OUTPUT_FILE})
+ add_dependencies(copy_${name} ${name})
+ if (NOT ARG_CHECK_PREFIXES)
+ set(ARG_CHECK_PREFIXES "CHECK")
+ endif()
+ foreach(prefix ${ARG_CHECK_PREFIXES})
+ add_test(NAME run_${name}_${prefix}
+ COMMAND
+ ${LLVM_FILECHECK_EXE} ${name}.cc
+ --input-file=${ASM_OUTPUT_FILE}
+ --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+ endforeach()
+endmacro()
+
diff --git a/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt b/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt
index f49ca5148f4..f15ce208189 100644
--- a/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt
+++ b/lib/libcxx/utils/google-benchmark/test/CMakeLists.txt
@@ -125,9 +125,21 @@ add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01
compile_output_test(user_counters_test)
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
+compile_output_test(report_aggregates_only_test)
+add_test(report_aggregates_only_test report_aggregates_only_test --benchmark_min_time=0.01)
+
+compile_output_test(display_aggregates_only_test)
+add_test(display_aggregates_only_test display_aggregates_only_test --benchmark_min_time=0.01)
+
compile_output_test(user_counters_tabular_test)
add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
+compile_output_test(user_counters_thousands_test)
+add_test(user_counters_thousands_test user_counters_thousands_test --benchmark_min_time=0.01)
+
+compile_output_test(memory_manager_test)
+add_test(memory_manager_test memory_manager_test --benchmark_min_time=0.01)
+
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
compile_benchmark_test(cxx03_test)
diff --git a/lib/libcxx/utils/google-benchmark/test/complexity_test.cc b/lib/libcxx/utils/google-benchmark/test/complexity_test.cc
index 5f91660898b..323ddfe7ac5 100644
--- a/lib/libcxx/utils/google-benchmark/test/complexity_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/complexity_test.cc
@@ -12,9 +12,10 @@ namespace {
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
-int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
- std::string big_o) {
- SetSubstitutions({{"%bigo_name", big_o_test_name},
+int AddComplexityTest(std::string test_name, std::string big_o_test_name,
+ std::string rms_test_name, std::string big_o) {
+ SetSubstitutions({{"%name", test_name},
+ {"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
@@ -25,12 +26,18 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"BigO\",$", MR_Next},
{"\"cpu_coefficient\": %float,$", MR_Next},
{"\"real_coefficient\": %float,$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
+ {"\"run_name\": \"%name\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"RMS\",$", MR_Next},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
@@ -59,6 +66,7 @@ BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) {
return 1.0;
});
+const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
const char *enum_big_o_1 = "\\([0-9]+\\)";
@@ -69,13 +77,16 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
-ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ enum_big_o_1);
// Add auto enum tests
-ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ auto_big_o_1);
// Add lambda tests
-ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
+ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
+ lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
@@ -112,16 +123,19 @@ BENCHMARK(BM_Complexity_O_N)
->Range(1 << 10, 1 << 16)
->Complexity();
+const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
-ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
+ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
+ enum_auto_big_o_n);
// Add lambda tests
-ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
+ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
+ lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
@@ -148,18 +162,19 @@ BENCHMARK(BM_Complexity_O_N_log_N)
->Range(1 << 10, 1 << 16)
->Complexity();
+const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
-ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
- enum_auto_big_o_n_lg_n);
+ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
+ rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests
-ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
- lambda_big_o_n_lg_n);
+ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
+ rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
diff --git a/lib/libcxx/utils/google-benchmark/test/display_aggregates_only_test.cc b/lib/libcxx/utils/google-benchmark/test/display_aggregates_only_test.cc
new file mode 100644
index 00000000000..3c36d3f03c1
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/test/display_aggregates_only_test.cc
@@ -0,0 +1,43 @@
+
+#undef NDEBUG
+#include <cstdio>
+#include <string>
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// Ok this test is super ugly. We want to check what happens with the file
+// reporter in the presence of DisplayAggregatesOnly().
+// We do not care about console output, the normal tests check that already.
+
+void BM_SummaryRepeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
+
+int main(int argc, char* argv[]) {
+ const std::string output = GetFileReporterOutput(argc, argv);
+
+ if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
+ 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
+ 1) {
+ std::cout << "Precondition mismatch. Expected to only find 6 "
+ "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
+ "output:\n";
+ std::cout << output;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/lib/libcxx/utils/google-benchmark/test/memory_manager_test.cc b/lib/libcxx/utils/google-benchmark/test/memory_manager_test.cc
new file mode 100644
index 00000000000..94be6083795
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/test/memory_manager_test.cc
@@ -0,0 +1,42 @@
+#include <memory>
+
+#include "../src/check.h"
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+class TestMemoryManager : public benchmark::MemoryManager {
+ void Start() {}
+ void Stop(Result* result) {
+ result->num_allocs = 42;
+ result->max_bytes_used = 42000;
+ }
+};
+
+void BM_empty(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(state.iterations());
+ }
+}
+BENCHMARK(BM_empty);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
+ {"\"run_name\": \"BM_empty\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"allocs_per_iter\": %float,$", MR_Next},
+ {"\"max_bytes_used\": 42000$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
+
+
+int main(int argc, char *argv[]) {
+ std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
+
+ benchmark::RegisterMemoryManager(mm.get());
+ RunOutputTests(argc, argv);
+ benchmark::RegisterMemoryManager(nullptr);
+}
diff --git a/lib/libcxx/utils/google-benchmark/test/output_test.h b/lib/libcxx/utils/google-benchmark/test/output_test.h
index 31a919991f7..9385761b214 100644
--- a/lib/libcxx/utils/google-benchmark/test/output_test.h
+++ b/lib/libcxx/utils/google-benchmark/test/output_test.h
@@ -60,6 +60,13 @@ int SetSubstitutions(
// Run all output tests.
void RunOutputTests(int argc, char* argv[]);
+// Count the number of 'pat' substrings in the 'haystack' string.
+int SubstrCnt(const std::string& haystack, const std::string& pat);
+
+// Run registered benchmarks with file reporter enabled, and return the content
+// outputted by the file reporter.
+std::string GetFileReporterOutput(int argc, char* argv[]);
+
// ========================================================================= //
// ------------------------- Results checking ------------------------------ //
// ========================================================================= //
diff --git a/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc b/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc
index 394c4f5d1a2..5dc951d2bca 100644
--- a/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc
+++ b/lib/libcxx/utils/google-benchmark/test/output_test_helper.cc
@@ -1,8 +1,12 @@
+#include <cstdio>
#include <cstring>
+#include <fstream>
#include <iostream>
#include <map>
#include <memory>
+#include <random>
#include <sstream>
+#include <streambuf>
#include "../src/benchmark_api_internal.h"
#include "../src/check.h" // NOTE: check.h is for internal use only!
@@ -35,15 +39,18 @@ SubMap& GetSubstitutions() {
// Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
+ static std::string time_re = "([0-9]+[.])?[0-9]+";
static SubMap map = {
{"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
// human-readable float
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
{"%int", "[ ]*[0-9]+"},
{" %s ", "[ ]+"},
- {"%time", "[ ]*[0-9]{1,6} ns"},
- {"%console_report", "[ ]*[0-9]{1,6} ns [ ]*[0-9]{1,6} ns [ ]*[0-9]+"},
- {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
+ {"%time", "[ ]*" + time_re + "[ ]+ns"},
+ {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"},
+ {"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"},
+ {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
+ {"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"},
{"%csv_header",
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
"items_per_second,label,error_occurred,error_message"},
@@ -202,7 +209,7 @@ void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
void ResultsChecker::CheckResults(std::stringstream& output) {
// first reset the stream to the start
{
- auto start = std::ios::streampos(0);
+ auto start = std::stringstream::pos_type(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
@@ -423,3 +430,76 @@ void RunOutputTests(int argc, char* argv[]) {
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
}
+
+int SubstrCnt(const std::string& haystack, const std::string& pat) {
+ if (pat.length() == 0) return 0;
+ int count = 0;
+ for (size_t offset = haystack.find(pat); offset != std::string::npos;
+ offset = haystack.find(pat, offset + pat.length()))
+ ++count;
+ return count;
+}
+
+static char ToHex(int ch) {
+ return ch < 10 ? static_cast<char>('0' + ch)
+ : static_cast<char>('a' + (ch - 10));
+}
+
+static char RandomHexChar() {
+ static std::mt19937 rd{std::random_device{}()};
+ static std::uniform_int_distribution<int> mrand{0, 15};
+ return ToHex(mrand(rd));
+}
+
+static std::string GetRandomFileName() {
+ std::string model = "test.%%%%%%";
+ for (auto & ch : model) {
+ if (ch == '%')
+ ch = RandomHexChar();
+ }
+ return model;
+}
+
+static bool FileExists(std::string const& name) {
+ std::ifstream in(name.c_str());
+ return in.good();
+}
+
+static std::string GetTempFileName() {
+ // This function attempts to avoid race conditions where two tests
+ // create the same file at the same time. However, it still introduces races
+ // similar to tmpnam.
+ int retries = 3;
+ while (--retries) {
+ std::string name = GetRandomFileName();
+ if (!FileExists(name))
+ return name;
+ }
+ std::cerr << "Failed to create unique temporary file name" << std::endl;
+ std::abort();
+}
+
+std::string GetFileReporterOutput(int argc, char* argv[]) {
+ std::vector<char*> new_argv(argv, argv + argc);
+ assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
+
+ std::string tmp_file_name = GetTempFileName();
+ std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
+
+ std::string tmp = "--benchmark_out=";
+ tmp += tmp_file_name;
+ new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
+
+ argc = int(new_argv.size());
+
+ benchmark::Initialize(&argc, new_argv.data());
+ benchmark::RunSpecifiedBenchmarks();
+
+ // Read the output back from the file, and delete the file.
+ std::ifstream tmp_stream(tmp_file_name);
+ std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
+ std::istreambuf_iterator<char>());
+ std::remove(tmp_file_name.c_str());
+
+ return output;
+}
diff --git a/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc b/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
index 18de6d68e21..3ac5b21fb34 100644
--- a/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/register_benchmark_test.cc
@@ -30,8 +30,8 @@ struct TestCase {
void CheckRun(Run const& run) const {
// clang-format off
- CHECK(name == run.benchmark_name) << "expected " << name << " got "
- << run.benchmark_name;
+ CHECK(name == run.benchmark_name()) << "expected " << name << " got "
+ << run.benchmark_name();
if (label) {
CHECK(run.report_label == label) << "expected " << label << " got "
<< run.report_label;
diff --git a/lib/libcxx/utils/google-benchmark/test/report_aggregates_only_test.cc b/lib/libcxx/utils/google-benchmark/test/report_aggregates_only_test.cc
new file mode 100644
index 00000000000..9646b9be534
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/test/report_aggregates_only_test.cc
@@ -0,0 +1,39 @@
+
+#undef NDEBUG
+#include <cstdio>
+#include <string>
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// Ok this test is super ugly. We want to check what happens with the file
+// reporter in the presence of ReportAggregatesOnly().
+// We do not care about console output, the normal tests check that already.
+
+void BM_SummaryRepeat(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
+
+int main(int argc, char* argv[]) {
+ const std::string output = GetFileReporterOutput(argc, argv);
+
+ if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
+ 1 ||
+ SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
+ 1) {
+ std::cout << "Precondition mismatch. Expected to only find three "
+ "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
+ "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
+ "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
+ "output:\n";
+ std::cout << output;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc b/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc
index 1662fcb8b54..ec6d51b3591 100644
--- a/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/reporter_output_test.cc
@@ -17,18 +17,21 @@ static int AddContextCases() {
{
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
- {"Run on \\(%int X %float MHz CPU s\\)", MR_Next},
+ {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut,
{{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
- {"\"executable\": \".*/reporter_output_test(\\.exe)?\",", MR_Next},
+ {"\"host_name\":", MR_Next},
+ {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
+ MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"cpu_scaling_enabled\": ", MR_Next},
{"\"caches\": \\[$", MR_Next}});
- auto const& Caches = benchmark::CPUInfo::Get().caches;
+ auto const& Info = benchmark::CPUInfo::Get();
+ auto const& Caches = Info.caches;
if (!Caches.empty()) {
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
@@ -45,8 +48,13 @@ static int AddContextCases() {
{"\"num_sharing\": %int$", MR_Next},
{"}[,]{0,1}$", MR_Next}});
}
-
AddCases(TC_JSONOut, {{"],$"}});
+ auto const& LoadAvg = Info.load_avg;
+ if (!LoadAvg.empty()) {
+ AddCases(TC_ConsoleErr,
+ {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
+ }
+ AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
}
int dummy_register = AddContextCases();
@@ -64,6 +72,8 @@ BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
+ {"\"run_name\": \"BM_basic\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -82,9 +92,11 @@ void BM_bytes_per_second(benchmark::State& state) {
}
BENCHMARK(BM_bytes_per_second);
-ADD_CASES(TC_ConsoleOut,
- {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
+ "bytes_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
+ {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -104,9 +116,11 @@ void BM_items_per_second(benchmark::State& state) {
}
BENCHMARK(BM_items_per_second);
-ADD_CASES(TC_ConsoleOut,
- {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
+ "items_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
+ {"\"run_name\": \"BM_items_per_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -128,6 +142,8 @@ BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
+ {"\"run_name\": \"BM_label\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -149,6 +165,8 @@ void BM_error(benchmark::State& state) {
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
+ {"\"run_name\": \"BM_error\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{"\"error_message\": \"message\",$", MR_Next}});
@@ -165,7 +183,9 @@ void BM_no_arg_name(benchmark::State& state) {
}
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
+ {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
@@ -178,7 +198,9 @@ void BM_arg_name(benchmark::State& state) {
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
+ {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
@@ -192,10 +214,25 @@ void BM_arg_names(benchmark::State& state) {
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
+ {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
+// ------------------------ Testing Big Args Output ------------------------ //
+// ========================================================================= //
+
+void BM_BigArgs(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
+ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
+ {"^BM_BigArgs/2147483648 %console_report$"}});
+
+// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
@@ -221,16 +258,33 @@ void BM_Repeat(benchmark::State& state) {
}
// need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2);
-ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
- {"^BM_Repeat/repeats:2 %console_report$"},
- {"^BM_Repeat/repeats:2_mean %console_report$"},
- {"^BM_Repeat/repeats:2_median %console_report$"},
- {"^BM_Repeat/repeats:2_stddev %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2 %console_report$"},
+ {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
+ {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
+ {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
- {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}});
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
@@ -238,18 +292,37 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
-ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
- {"^BM_Repeat/repeats:3 %console_report$"},
- {"^BM_Repeat/repeats:3 %console_report$"},
- {"^BM_Repeat/repeats:3_mean %console_report$"},
- {"^BM_Repeat/repeats:3_median %console_report$"},
- {"^BM_Repeat/repeats:3_stddev %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3 %console_report$"},
+ {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
+ {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
+ {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
- {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
@@ -258,20 +331,41 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4);
-ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
- {"^BM_Repeat/repeats:4 %console_report$"},
- {"^BM_Repeat/repeats:4 %console_report$"},
- {"^BM_Repeat/repeats:4 %console_report$"},
- {"^BM_Repeat/repeats:4_mean %console_report$"},
- {"^BM_Repeat/repeats:4_median %console_report$"},
- {"^BM_Repeat/repeats:4_stddev %console_report$"}});
+ADD_CASES(TC_ConsoleOut,
+ {{"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4 %console_report$"},
+ {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
+ {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
+ {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
- {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}});
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next},
+ {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
+ {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 4,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
@@ -288,7 +382,9 @@ void BM_RepeatOnce(benchmark::State& state) {
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
+ {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
@@ -297,20 +393,72 @@ void BM_SummaryRepeat(benchmark::State& state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
-ADD_CASES(TC_ConsoleOut,
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
+ {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
+ {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
+ {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
+ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
- {"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
- {"^BM_SummaryRepeat/repeats:3_median %console_report$"},
- {"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
-ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
- {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
- {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
- {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
+ {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
+// Test that non-aggregate data is not displayed.
+// NOTE: this test is kinda bad. we are only testing the display output.
+// But we don't check that the file output still contains everything...
+void BM_SummaryDisplay(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
+ {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
+ {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
+ADD_CASES(TC_JSONOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
+ {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next}});
+ADD_CASES(TC_CSVOut,
+ {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
+ {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
+ {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
+ {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
+
+// Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) {
for (auto _ : state) {
}
@@ -319,18 +467,34 @@ BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
-ADD_CASES(TC_ConsoleOut,
+ADD_CASES(
+ TC_ConsoleOut,
+ {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
+ {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
+ {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
+ "]*3$"},
+ {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
+ "]*3$"}});
+ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
- {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
- {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"},
- {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
-ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
- {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
- {"\"time_unit\": \"us\",?$"},
- {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
- {"\"time_unit\": \"us\",?$"},
- {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
- {"\"time_unit\": \"us\",?$"}});
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"},
+ {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
+ {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
@@ -346,37 +510,92 @@ const auto UserStatistics = [](const std::vector<double>& v) {
};
void BM_UserStats(benchmark::State& state) {
for (auto _ : state) {
+ state.SetIterationTime(150 / 10e8);
}
}
// clang-format off
BENCHMARK(BM_UserStats)
->Repetitions(3)
+ ->Iterations(5)
+ ->UseManualTime()
->ComputeStatistics("", UserStatistics);
// clang-format on
// check that user-provided stats is calculated, and is after the default-ones
// empty string as name is intentional, it would sort before anything else
-ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"},
- {"^BM_UserStats/repeats:3 %console_report$"},
- {"^BM_UserStats/repeats:3 %console_report$"},
- {"^BM_UserStats/repeats:3_mean %console_report$"},
- {"^BM_UserStats/repeats:3_median %console_report$"},
- {"^BM_UserStats/repeats:3_stddev %console_report$"},
- {"^BM_UserStats/repeats:3_ %console_report$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"},
- {"\"name\": \"BM_UserStats/repeats:3\",$"},
- {"\"name\": \"BM_UserStats/repeats:3\",$"},
- {"\"name\": \"BM_UserStats/repeats:3_mean\",$"},
- {"\"name\": \"BM_UserStats/repeats:3_median\",$"},
- {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"},
- {"\"name\": \"BM_UserStats/repeats:3_\",$"}});
-ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3_median\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"},
- {"^\"BM_UserStats/repeats:3_\",%csv_report$"}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
+ "]* 150 ns %time [ ]*5$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_mean [ ]* 150 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_median [ ]* 150 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
+ {"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
+ "[ ]* 150 ns %time [ ]*3$"}});
+ADD_CASES(
+ TC_JSONOut,
+ {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": 5,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
+ {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
+ MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"\",$", MR_Next},
+ {"\"iterations\": 3,$", MR_Next},
+ {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_median\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/"
+ "manual_time_stddev\",%csv_report$"},
+ {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
diff --git a/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc b/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
index 39785fb7f6d..06579772ff7 100644
--- a/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/skip_with_error_test.cc
@@ -33,8 +33,8 @@ struct TestCase {
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
- CHECK(name == run.benchmark_name)
- << "expected " << name << " got " << run.benchmark_name;
+ CHECK(name == run.benchmark_name())
+ << "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred) {
diff --git a/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc b/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc
index 4c81734cf8a..2c5d073f613 100644
--- a/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc
+++ b/lib/libcxx/utils/google-benchmark/test/string_util_gtest.cc
@@ -9,56 +9,56 @@ namespace {
TEST(StringUtilTest, stoul) {
{
size_t pos = 0;
- EXPECT_EQ(0, benchmark::stoul("0", &pos));
- EXPECT_EQ(1, pos);
+ EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
+ EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(7, benchmark::stoul("7", &pos));
- EXPECT_EQ(1, pos);
+ EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
+ EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(135, benchmark::stoul("135", &pos));
- EXPECT_EQ(3, pos);
+ EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
+ EXPECT_EQ(3ul, pos);
}
#if ULONG_MAX == 0xFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
- EXPECT_EQ(10, pos);
+ EXPECT_EQ(10ul, pos);
}
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
- EXPECT_EQ(20, pos);
+ EXPECT_EQ(20ul, pos);
}
#endif
{
size_t pos = 0;
- EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
- EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
+ EXPECT_EQ(4ul, pos);
}
{
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
@@ -69,42 +69,42 @@ TEST(StringUtilTest, stoi) {
{
size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
- EXPECT_EQ(1, pos);
+ EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
- EXPECT_EQ(3, pos);
+ EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
@@ -115,28 +115,28 @@ TEST(StringUtilTest, stod) {
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
- EXPECT_EQ(1, pos);
+ EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
- EXPECT_EQ(3, pos);
+ EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
- EXPECT_EQ(4, pos);
+ EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
- EXPECT_EQ(3, pos);
+ EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
- EXPECT_EQ(8, pos);
+ EXPECT_EQ(8ul, pos);
}
{
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
diff --git a/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc b/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
index 4f126b6d978..030e98916c3 100644
--- a/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/user_counters_tabular_test.cc
@@ -69,18 +69,21 @@ void BM_Counters_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -113,18 +116,22 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float,$", MR_Next},
- {"\"Frob\": %float,$", MR_Next},
- {"\"Lob\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float,$", MR_Next},
+ {"\"Frob\": %float,$", MR_Next},
+ {"\"Lob\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -157,15 +164,18 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -189,15 +199,18 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bar\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bar\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -225,15 +238,18 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"Bat\": %float,$", MR_Next},
- {"\"Baz\": %float,$", MR_Next},
- {"\"Foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
+ {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"Bat\": %float,$", MR_Next},
+ {"\"Baz\": %float,$", MR_Next},
+ {"\"Foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
diff --git a/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc b/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc
index 7f7ccb9f77a..bb0d6b4c5a9 100644
--- a/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc
+++ b/lib/libcxx/utils/google-benchmark/test/user_counters_test.cc
@@ -32,6 +32,8 @@ BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
+ {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -66,19 +68,22 @@ void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
state.SetItemsProcessed(150);
}
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
-ADD_CASES(TC_ConsoleOut,
- {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
- "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bytes_per_second\": %float,$", MR_Next},
- {"\"items_per_second\": %float,$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
+ "bar=%hrfloat bytes_per_second=%hrfloat/s "
+ "foo=%hrfloat items_per_second=%hrfloat/s$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
+ {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"bytes_per_second\": %float,$", MR_Next},
+ {"\"foo\": %float,$", MR_Next},
+ {"\"items_per_second\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -110,6 +115,8 @@ ADD_CASES(
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
+ {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -141,14 +148,17 @@ void BM_Counters_Threads(benchmark::State& state) {
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
+ {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
@@ -174,14 +184,17 @@ void BM_Counters_AvgThreads(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
+ {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
@@ -210,6 +223,9 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
+ {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -242,14 +258,17 @@ void BM_Counters_IterationInvariant(benchmark::State& state) {
BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
+ {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -281,6 +300,9 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
+ {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
+ MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
@@ -316,14 +338,17 @@ void BM_Counters_AvgIterations(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_AvgIterations\",$"},
+ {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
@@ -351,14 +376,17 @@ void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
-ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
- {"\"iterations\": %int,$", MR_Next},
- {"\"real_time\": %float,$", MR_Next},
- {"\"cpu_time\": %float,$", MR_Next},
- {"\"time_unit\": \"ns\",$", MR_Next},
- {"\"bar\": %float,$", MR_Next},
- {"\"foo\": %float$", MR_Next},
- {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
+ {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"bar\": %float,$", MR_Next},
+ {"\"foo\": %float$", MR_Next},
+ {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
diff --git a/lib/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc b/lib/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc
new file mode 100644
index 00000000000..fa0ef972047
--- /dev/null
+++ b/lib/libcxx/utils/google-benchmark/test/user_counters_thousands_test.cc
@@ -0,0 +1,161 @@
+
+#undef NDEBUG
+
+#include "benchmark/benchmark.h"
+#include "output_test.h"
+
+// ========================================================================= //
+// ------------------------ Thousands Customisation ------------------------ //
+// ========================================================================= //
+
+void BM_Counters_Thousands(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+ namespace bm = benchmark;
+ state.counters.insert({
+ {"t0_1000000DefaultBase",
+ bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
+ {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1000)},
+ {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1024)},
+ {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1000)},
+ {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
+ benchmark::Counter::OneK::kIs1024)},
+ });
+}
+BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
+ADD_CASES(
+ TC_ConsoleOut,
+ {
+ {"^BM_Counters_Thousands/repeats:2 %console_report "
+ "t0_1000000DefaultBase=1000k "
+ "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
+ "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2 %console_report "
+ "t0_1000000DefaultBase=1000k "
+ "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
+ "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_mean %console_report "
+ "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
+ "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
+ "t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_median %console_report "
+ "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
+ "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
+ "t4_1048576Base1024=1024k$"},
+ {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
+ "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
+ "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
+ });
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"mean\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"median\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
+ {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
+ {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
+ {"\"run_type\": \"aggregate\",$", MR_Next},
+ {"\"aggregate_name\": \"stddev\",$", MR_Next},
+ {"\"iterations\": 2,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\",$", MR_Next},
+ {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
+ {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
+ {"}", MR_Next}});
+
+ADD_CASES(
+ TC_CSVOut,
+ {{"^\"BM_Counters_Thousands/"
+ "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
+ "0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
+ "0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
+ "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/"
+ "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
+ "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
+ {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
+// VS2013 does not allow this function to be passed as a lambda argument
+// to CHECK_BENCHMARK_RESULTS()
+void CheckThousands(Results const& e) {
+ if (e.name != "BM_Counters_Thousands/repeats:2")
+ return; // Do not check the aggregates!
+
+ // check that the values are within 0.01% of the expected values
+ CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
+ 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
+ CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
+}
+CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
diff --git a/lib/libcxx/utils/google-benchmark/tools/compare.py b/lib/libcxx/utils/google-benchmark/tools/compare.py
index d27e24b3492..539ace6fb16 100755
--- a/lib/libcxx/utils/google-benchmark/tools/compare.py
+++ b/lib/libcxx/utils/google-benchmark/tools/compare.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+import unittest
"""
compare.py - versatile benchmark output compare tool
"""
@@ -36,6 +37,17 @@ def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
+ parser.add_argument(
+ '-a',
+ '--display_aggregates_only',
+ dest='display_aggregates_only',
+ action="store_true",
+ help="If there are repetitions, by default, we display everything - the"
+ " actual runs, and the aggregates computed. Sometimes, it is "
+ "desirable to only view the aggregates. E.g. when there are a lot "
+ "of repetitions. Do note that only the display is affected. "
+ "Internally, all the actual runs are still used, e.g. for U test.")
+
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
@@ -200,6 +212,9 @@ def main():
check_inputs(test_baseline, test_contender, benchmark_options)
+ if args.display_aggregates_only:
+ benchmark_options += ['--benchmark_display_aggregates_only=true']
+
options_baseline = []
options_contender = []
@@ -223,15 +238,13 @@ def main():
# Diff and output
output_lines = gbench.report.generate_difference_report(
- json1, json2, args.utest, args.utest_alpha)
+ json1, json2, args.display_aggregates_only,
+ args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
-import unittest
-
-
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
@@ -246,6 +259,7 @@ class TestParser(unittest.TestCase):
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
@@ -255,6 +269,7 @@ class TestParser(unittest.TestCase):
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
@@ -262,9 +277,20 @@ class TestParser(unittest.TestCase):
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
+ def test_benchmarks_basic_display_aggregates_only(self):
+ parsed = self.parser.parse_args(
+ ['-a', 'benchmarks', self.testInput0, self.testInput1])
+ self.assertTrue(parsed.display_aggregates_only)
+ self.assertTrue(parsed.utest)
+ self.assertEqual(parsed.mode, 'benchmarks')
+ self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
+ self.assertEqual(parsed.test_contender[0].name, self.testInput1)
+ self.assertFalse(parsed.benchmark_options)
+
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
@@ -275,6 +301,7 @@ class TestParser(unittest.TestCase):
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
@@ -285,6 +312,7 @@ class TestParser(unittest.TestCase):
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
@@ -294,6 +322,7 @@ class TestParser(unittest.TestCase):
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
@@ -303,6 +332,7 @@ class TestParser(unittest.TestCase):
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
@@ -313,6 +343,7 @@ class TestParser(unittest.TestCase):
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
@@ -323,6 +354,7 @@ class TestParser(unittest.TestCase):
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
@@ -333,6 +365,7 @@ class TestParser(unittest.TestCase):
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
@@ -344,6 +377,7 @@ class TestParser(unittest.TestCase):
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
@@ -355,6 +389,7 @@ class TestParser(unittest.TestCase):
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
+ self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
index ca793f3367e..49f8b061437 100644
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
+++ b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
@@ -9,6 +9,7 @@
"benchmarks": [
{
"name": "BM_One",
+ "run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 100,
@@ -25,15 +26,40 @@
"name": "BM_Two",
"iterations": 1000,
"real_time": 8,
+ "cpu_time": 86,
+ "time_unit": "ns"
+ },
+ {
+ "name": "short",
+ "run_type": "aggregate",
+ "iterations": 1000,
+ "real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
},
{
"name": "short",
+ "run_type": "aggregate",
+ "iterations": 1000,
+ "real_time": 8,
+ "cpu_time": 77,
+ "time_unit": "ns"
+ },
+ {
+ "name": "medium",
+ "run_type": "iteration",
"iterations": 1000,
"real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
+ },
+ {
+ "name": "medium",
+ "run_type": "iteration",
+ "iterations": 1000,
+ "real_time": 9,
+ "cpu_time": 82,
+ "time_unit": "ns"
}
]
}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
index e5cf50c7445..acc5ba17aed 100644
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
+++ b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
@@ -16,6 +16,7 @@
},
{
"name": "BM_Two",
+ "run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 89,
@@ -25,14 +26,39 @@
"name": "BM_Two",
"iterations": 1000,
"real_time": 7,
- "cpu_time": 70,
+ "cpu_time": 72,
"time_unit": "ns"
},
{
"name": "short",
+ "run_type": "aggregate",
"iterations": 1000,
- "real_time": 8,
- "cpu_time": 80,
+ "real_time": 7,
+ "cpu_time": 75,
+ "time_unit": "ns"
+ },
+ {
+ "name": "short",
+ "run_type": "aggregate",
+ "iterations": 762,
+ "real_time": 4.54,
+ "cpu_time": 66.6,
+ "time_unit": "ns"
+ },
+ {
+ "name": "short",
+ "run_type": "iteration",
+ "iterations": 1000,
+ "real_time": 800,
+ "cpu_time": 1,
+ "time_unit": "ns"
+ },
+ {
+ "name": "medium",
+ "run_type": "iteration",
+ "iterations": 1200,
+ "real_time": 5,
+ "cpu_time": 53,
"time_unit": "ns"
}
]
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/report.py b/lib/libcxx/utils/google-benchmark/tools/gbench/report.py
index 4d03a547677..5085b931947 100644
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/report.py
+++ b/lib/libcxx/utils/google-benchmark/tools/gbench/report.py
@@ -1,3 +1,4 @@
+import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
@@ -36,6 +37,7 @@ BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
+UTEST_COL_NAME = "_pvalue"
def color_format(use_color, fmt_str, *args, **kwargs):
@@ -93,9 +95,103 @@ def filter_benchmark(json_orig, family, replacement=""):
return filtered
+def get_unique_benchmark_names(json):
+ """
+ While *keeping* the order, give all the unique 'names' used for benchmarks.
+ """
+ seen = set()
+ uniqued = [x['name'] for x in json['benchmarks']
+ if x['name'] not in seen and
+ (seen.add(x['name']) or True)]
+ return uniqued
+
+
+def intersect(list1, list2):
+ """
+ Given two lists, get a new list consisting of the elements only contained
+ in *both of the input lists*, while preserving the ordering.
+ """
+ return [x for x in list1 if x in list2]
+
+
+def partition_benchmarks(json1, json2):
+ """
+ While preserving the ordering, find benchmarks with the same names in
+ both of the inputs, and group them.
+ (i.e. partition/filter into groups with common name)
+ """
+ json1_unique_names = get_unique_benchmark_names(json1)
+ json2_unique_names = get_unique_benchmark_names(json2)
+ names = intersect(json1_unique_names, json2_unique_names)
+ partitions = []
+ for name in names:
+ # Pick the time unit from the first entry of the lhs benchmark.
+ time_unit = (x['time_unit']
+ for x in json1['benchmarks'] if x['name'] == name).next()
+ # Filter by name and time unit.
+ lhs = [x for x in json1['benchmarks'] if x['name'] == name and
+ x['time_unit'] == time_unit]
+ rhs = [x for x in json2['benchmarks'] if x['name'] == name and
+ x['time_unit'] == time_unit]
+ partitions.append([lhs, rhs])
+ return partitions
+
+
+def extract_field(partition, field_name):
+ # The count of elements may be different. We want *all* of them.
+ lhs = [x[field_name] for x in partition[0]]
+ rhs = [x[field_name] for x in partition[1]]
+ return [lhs, rhs]
+
+
+def print_utest(partition, utest_alpha, first_col_width, use_color=True):
+ timings_time = extract_field(partition, 'real_time')
+ timings_cpu = extract_field(partition, 'cpu_time')
+
+ min_rep_cnt = min(len(timings_time[0]),
+ len(timings_time[1]),
+ len(timings_cpu[0]),
+ len(timings_cpu[1]))
+
+ # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
+ if min_rep_cnt < UTEST_MIN_REPETITIONS:
+ return []
+
+ def get_utest_color(pval):
+ return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
+
+ time_pvalue = mannwhitneyu(
+ timings_time[0], timings_time[1], alternative='two-sided').pvalue
+ cpu_pvalue = mannwhitneyu(
+ timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
+
+ dsc = "U Test, Repetitions: {} vs {}".format(
+ len(timings_cpu[0]), len(timings_cpu[1]))
+ dsc_color = BC_OKGREEN
+
+ if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
+ dsc_color = BC_WARNING
+ dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
+ UTEST_OPTIMAL_REPETITIONS)
+
+ special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
+
+ last_name = partition[0][0]['name']
+ return [color_format(use_color,
+ special_str,
+ BC_HEADER,
+ "{}{}".format(last_name, UTEST_COL_NAME),
+ first_col_width,
+ get_utest_color(time_pvalue), time_pvalue,
+ get_utest_color(cpu_pvalue), cpu_pvalue,
+ dsc_color, dsc,
+ endc=BC_ENDC)]
+
+
def generate_difference_report(
json1,
json2,
+ display_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
@@ -112,108 +208,95 @@ def generate_difference_report(
return b
return None
- utest_col_name = "_pvalue"
first_col_width = max(
first_col_width,
len('Benchmark'))
- first_col_width += len(utest_col_name)
+ first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
- last_name = None
- timings_time = [[], []]
- timings_cpu = [[], []]
-
- gen = (bn for bn in json1['benchmarks']
- if 'real_time' in bn and 'cpu_time' in bn)
- for bn in gen:
- fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
- special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
-
- if last_name is None:
- last_name = bn['name']
- if last_name != bn['name']:
- if ((len(timings_time[0]) >= UTEST_MIN_REPETITIONS) and
- (len(timings_time[1]) >= UTEST_MIN_REPETITIONS) and
- (len(timings_cpu[0]) >= UTEST_MIN_REPETITIONS) and
- (len(timings_cpu[1]) >= UTEST_MIN_REPETITIONS)):
- if utest:
- def get_utest_color(pval):
- if pval >= utest_alpha:
- return BC_FAIL
- else:
- return BC_OKGREEN
- time_pvalue = mannwhitneyu(
- timings_time[0], timings_time[1], alternative='two-sided').pvalue
- cpu_pvalue = mannwhitneyu(
- timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
- dsc = "U Test, Repetitions: {}".format(len(timings_cpu[0]))
- dsc_color = BC_OKGREEN
- if len(timings_cpu[0]) < UTEST_OPTIMAL_REPETITIONS:
- dsc_color = BC_WARNING
- dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
- UTEST_OPTIMAL_REPETITIONS)
- output_strs += [color_format(use_color,
- special_str,
- BC_HEADER,
- "{}{}".format(last_name,
- utest_col_name),
- first_col_width,
- get_utest_color(time_pvalue),
- time_pvalue,
- get_utest_color(cpu_pvalue),
- cpu_pvalue,
- dsc_color,
- dsc,
- endc=BC_ENDC)]
- last_name = bn['name']
- timings_time = [[], []]
- timings_cpu = [[], []]
-
- other_bench = find_test(bn['name'])
- if not other_bench:
- continue
-
- if bn['time_unit'] != other_bench['time_unit']:
- continue
+ partitions = partition_benchmarks(json1, json2)
+ for partition in partitions:
+ # Careful, we may have different repetition count.
+ for i in range(min(len(partition[0]), len(partition[1]))):
+ bn = partition[0][i]
+ other_bench = partition[1][i]
+
+ # *If* we were asked to only display aggregates,
+ # and if it is non-aggregate, then skip it.
+ if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
+ assert bn['run_type'] == other_bench['run_type']
+ if bn['run_type'] != 'aggregate':
+ continue
+
+ fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
+
+ def get_color(res):
+ if res > 0.05:
+ return BC_FAIL
+ elif res > -0.07:
+ return BC_WHITE
+ else:
+ return BC_CYAN
+
+ tres = calculate_change(bn['real_time'], other_bench['real_time'])
+ cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
+ output_strs += [color_format(use_color,
+ fmt_str,
+ BC_HEADER,
+ bn['name'],
+ first_col_width,
+ get_color(tres),
+ tres,
+ get_color(cpures),
+ cpures,
+ bn['real_time'],
+ other_bench['real_time'],
+ bn['cpu_time'],
+ other_bench['cpu_time'],
+ endc=BC_ENDC)]
+
+ # After processing the whole partition, if requested, do the U test.
+ if utest:
+ output_strs += print_utest(partition,
+ utest_alpha=utest_alpha,
+ first_col_width=first_col_width,
+ use_color=use_color)
- def get_color(res):
- if res > 0.05:
- return BC_FAIL
- elif res > -0.07:
- return BC_WHITE
- else:
- return BC_CYAN
-
- timings_time[0].append(bn['real_time'])
- timings_time[1].append(other_bench['real_time'])
- timings_cpu[0].append(bn['cpu_time'])
- timings_cpu[1].append(other_bench['cpu_time'])
-
- tres = calculate_change(timings_time[0][-1], timings_time[1][-1])
- cpures = calculate_change(timings_cpu[0][-1], timings_cpu[1][-1])
- output_strs += [color_format(use_color,
- fmt_str,
- BC_HEADER,
- bn['name'],
- first_col_width,
- get_color(tres),
- tres,
- get_color(cpures),
- cpures,
- timings_time[0][-1],
- timings_time[1][-1],
- timings_cpu[0][-1],
- timings_cpu[1][-1],
- endc=BC_ENDC)]
return output_strs
+
###############################################################################
# Unit tests
-import unittest
+class TestGetUniqueBenchmarkNames(unittest.TestCase):
+ def load_results(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput = os.path.join(testInputs, 'test3_run0.json')
+ with open(testOutput, 'r') as f:
+ json = json.load(f)
+ return json
+
+ def test_basic(self):
+ expect_lines = [
+ 'BM_One',
+ 'BM_Two',
+ 'short', # These two are not sorted
+ 'medium', # These two are not sorted
+ ]
+ json = self.load_results()
+ output_lines = get_unique_benchmark_names(json)
+ print("\n")
+ print("\n".join(output_lines))
+ self.assertEqual(len(output_lines), len(expect_lines))
+ for i in range(0, len(output_lines)):
+ self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase):
@@ -259,7 +342,7 @@ class TestReportDifference(unittest.TestCase):
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
- self.assertEqual(parts, expect_lines[i])
+ self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
@@ -293,7 +376,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
- self.assertEqual(parts, expect_lines[i])
+ self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceWithUTest(unittest.TestCase):
@@ -316,13 +399,15 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
- ['BM_Two', '+0.2500', '+0.1125', '8', '10', '80', '89'],
+ ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
- '0.2207',
- '0.6831',
+ '0.6985',
+ '0.6985',
'U',
'Test,',
'Repetitions:',
+ '2',
+ 'vs',
'2.',
'WARNING:',
'Results',
@@ -330,18 +415,103 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
'9+',
'repetitions',
'recommended.'],
- ['short', '+0.0000', '+0.0000', '8', '8', '80', '80'],
+ ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
+ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
+ ['short_pvalue',
+ '0.7671',
+ '0.1489',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '3.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
+ ]
+ json1, json2 = self.load_results()
+ output_lines_with_header = generate_difference_report(
+ json1, json2, utest=True, utest_alpha=0.05, use_color=False)
+ output_lines = output_lines_with_header[2:]
+ print("\n")
+ print("\n".join(output_lines_with_header))
+ self.assertEqual(len(output_lines), len(expect_lines))
+ for i in range(0, len(output_lines)):
+ parts = [x for x in output_lines[i].split(' ') if x]
+ self.assertEqual(expect_lines[i], parts)
+
+
+class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
+ unittest.TestCase):
+ def load_results(self):
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test3_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test3_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ def test_utest(self):
+ expect_lines = []
+ expect_lines = [
+ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
+ ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
+ ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
+ ['BM_Two_pvalue',
+ '0.6985',
+ '0.6985',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '2.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
+ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
+ ['short_pvalue',
+ '0.7671',
+ '0.1489',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '3.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
- json1, json2, True, 0.05, use_color=False)
+ json1, json2, display_aggregates_only=True,
+ utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(parts, expect_lines[i])
+ self.assertEqual(expect_lines[i], parts)
if __name__ == '__main__':
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/util.py b/lib/libcxx/utils/google-benchmark/tools/gbench/util.py
index 07c23772754..1f8e8e2c479 100644
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/util.py
+++ b/lib/libcxx/utils/google-benchmark/tools/gbench/util.py
@@ -7,11 +7,13 @@ import subprocess
import sys
# Input file type enumeration
-IT_Invalid = 0
-IT_JSON = 1
+IT_Invalid = 0
+IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
+
+
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
@@ -46,7 +48,7 @@ def is_json_file(filename):
with open(filename, 'r') as f:
json.load(f)
return True
- except:
+ except BaseException:
pass
return False
@@ -84,6 +86,7 @@ def check_input_file(filename):
sys.exit(1)
return ftype
+
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
@@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
result = f[len(prefix):]
return result
+
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
@@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
+
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
@@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
- ['--benchmark_out=%s' % output_name]
+ ['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
@@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
- assert False # This branch is unreachable \ No newline at end of file
+ assert False # This branch is unreachable
diff --git a/lib/libcxx/utils/libcxx/test/format.py b/lib/libcxx/utils/libcxx/test/format.py
index 74e3cc0aa30..6a334ac31cf 100644
--- a/lib/libcxx/utils/libcxx/test/format.py
+++ b/lib/libcxx/utils/libcxx/test/format.py
@@ -188,7 +188,7 @@ class LibcxxTestFormat(object):
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
- return lit.Test.FAIL, report
+ return lit.Test.Result(lit.Test.FAIL, report)
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
@@ -206,14 +206,14 @@ class LibcxxTestFormat(object):
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files,
env)
+ report = "Compiled With: '%s'\n" % ' '.join(compile_cmd)
+ report += libcxx.util.makeReport(cmd, out, err, rc)
if rc == 0:
res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS
- return res, ''
+ return lit.Test.Result(res, report)
elif rc != 0 and retry_count + 1 == max_retry:
- report = libcxx.util.makeReport(cmd, out, err, rc)
- report = "Compiled With: %s\n%s" % (compile_cmd, report)
report += "Compiled test failed unexpectedly!"
- return lit.Test.FAIL, report
+ return lit.Test.Result(lit.Test.FAIL, report)
assert False # Unreachable
finally:
@@ -250,16 +250,15 @@ class LibcxxTestFormat(object):
#
# Therefore, we check if the test was expected to fail because of
# nodiscard before enabling it
- test_str = "ignoring return value of function declared with " \
- + "'nodiscard' attribute"
- if test_str in contents:
+ test_str_list = ['ignoring return value', 'nodiscard', 'NODISCARD']
+ if any(test_str in contents for test_str in test_str_list):
test_cxx.flags += ['-Werror=unused-result']
cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull)
expected_rc = 0 if use_verify else 1
+ report = libcxx.util.makeReport(cmd, out, err, rc)
if rc == expected_rc:
- return lit.Test.PASS, ''
+ return lit.Test.Result(lit.Test.PASS, report)
else:
- report = libcxx.util.makeReport(cmd, out, err, rc)
- report_msg = ('Expected compilation to fail!' if not use_verify else
- 'Expected compilation using verify to pass!')
- return lit.Test.FAIL, report + report_msg + '\n'
+ report += ('Expected compilation to fail!\n' if not use_verify else
+ 'Expected compilation using verify to pass!\n')
+ return lit.Test.Result(lit.Test.FAIL, report)
diff --git a/lib/libcxx/utils/libcxx/test/googlebenchmark.py b/lib/libcxx/utils/libcxx/test/googlebenchmark.py
new file mode 100644
index 00000000000..6fe731e8c91
--- /dev/null
+++ b/lib/libcxx/utils/libcxx/test/googlebenchmark.py
@@ -0,0 +1,122 @@
+from __future__ import absolute_import
+import os
+import subprocess
+import sys
+
+import lit.Test
+import lit.TestRunner
+import lit.util
+from lit.formats.base import TestFormat
+
+kIsWindows = sys.platform in ['win32', 'cygwin']
+
+class GoogleBenchmark(TestFormat):
+ def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
+ self.benchmark_args = list(benchmark_args)
+ self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
+
+ # On Windows, assume tests will also end in '.exe'.
+ exe_suffix = str(test_suffix)
+ if kIsWindows:
+ exe_suffix += '.exe'
+
+ # Also check for .py files for testing purposes.
+ self.test_suffixes = {exe_suffix, test_suffix + '.py'}
+
+ def getBenchmarkTests(self, path, litConfig, localConfig):
+ """getBenchmarkTests(path) - [name]
+
+ Return the tests available in gtest executable.
+
+ Args:
+ path: String path to a gtest executable
+ litConfig: LitConfig instance
+ localConfig: TestingConfig instance"""
+
+ # TODO: allow splitting tests according to the "benchmark family" so
+ # the output for a single family of tests all belongs to the same test
+ # target.
+ list_test_cmd = [path, '--benchmark_list_tests']
+ try:
+ output = subprocess.check_output(list_test_cmd,
+ env=localConfig.environment)
+ except subprocess.CalledProcessError as exc:
+ litConfig.warning(
+ "unable to discover google-benchmarks in %r: %s. Process output: %s"
+ % (path, sys.exc_info()[1], exc.output))
+ raise StopIteration
+
+ nested_tests = []
+ for ln in output.splitlines(False): # Don't keep newlines.
+ ln = lit.util.to_string(ln)
+ if not ln.strip():
+ continue
+
+ index = 0
+ while ln[index*2:index*2+2] == ' ':
+ index += 1
+ while len(nested_tests) > index:
+ nested_tests.pop()
+
+ ln = ln[index*2:]
+ if ln.endswith('.'):
+ nested_tests.append(ln)
+ elif any([name.startswith('DISABLED_')
+ for name in nested_tests + [ln]]):
+ # Gtest will internally skip these tests. No need to launch a
+ # child process for it.
+ continue
+ else:
+ yield ''.join(nested_tests) + ln
+
+ def getTestsInDirectory(self, testSuite, path_in_suite,
+ litConfig, localConfig):
+ source_path = testSuite.getSourcePath(path_in_suite)
+ for subdir in self.test_sub_dirs:
+ dir_path = os.path.join(source_path, subdir)
+ if not os.path.isdir(dir_path):
+ continue
+ for fn in lit.util.listdir_files(dir_path,
+ suffixes=self.test_suffixes):
+ # Discover the tests in this executable.
+ execpath = os.path.join(source_path, subdir, fn)
+ testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
+ for testname in testnames:
+ testPath = path_in_suite + (subdir, fn, testname)
+ yield lit.Test.Test(testSuite, testPath, localConfig,
+ file_path=execpath)
+
+ def execute(self, test, litConfig):
+ testPath,testName = os.path.split(test.getSourcePath())
+ while not os.path.exists(testPath):
+ # Handle GTest parametrized and typed tests, whose name includes
+ # some '/'s.
+ testPath, namePrefix = os.path.split(testPath)
+ testName = namePrefix + '/' + testName
+
+ cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
+
+ if litConfig.noExecute:
+ return lit.Test.PASS, ''
+
+ try:
+ out, err, exitCode = lit.util.executeCommand(
+ cmd, env=test.config.environment,
+ timeout=litConfig.maxIndividualTestTime)
+ except lit.util.ExecuteCommandTimeoutException:
+ return (lit.Test.TIMEOUT,
+ 'Reached timeout of {} seconds'.format(
+ litConfig.maxIndividualTestTime)
+ )
+
+ if exitCode:
+ return lit.Test.FAIL, out + err
+
+ passing_test_line = testName
+ if passing_test_line not in out:
+ msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
+ (passing_test_line, out, err))
+ return lit.Test.UNRESOLVED, msg
+
+ return lit.Test.PASS, err + out
+
diff --git a/lib/libcxx/utils/libcxx/test/target_info.py b/lib/libcxx/utils/libcxx/test/target_info.py
index de2232ff418..32bbb2e1150 100644
--- a/lib/libcxx/utils/libcxx/test/target_info.py
+++ b/lib/libcxx/utils/libcxx/test/target_info.py
@@ -15,6 +15,8 @@ import re
import subprocess
import sys
+from libcxx.util import executeCommand
+
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
@@ -127,14 +129,13 @@ class DarwinLocalTI(DefaultTargetInfo):
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
else:
cmd = ['xcrun', '--show-sdk-path']
- try:
- out = subprocess.check_output(cmd).strip()
- res = 0
- except OSError:
- res = -1
- if res == 0 and out:
- sdk_path = out
+ out, err, exit_code = executeCommand(cmd)
+ if exit_code != 0:
+ self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
+ if exit_code == 0 and out:
+ sdk_path = out.strip()
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
+ assert isinstance(sdk_path, str)
flags += ["-isysroot", sdk_path]
def add_cxx_link_flags(self, flags):
@@ -143,12 +144,12 @@ class DarwinLocalTI(DefaultTargetInfo):
def configure_env(self, env):
library_paths = []
# Configure the library path for libc++
- if self.full_config.use_system_cxx_lib:
+ if self.full_config.cxx_runtime_root:
+ library_paths += [self.full_config.cxx_runtime_root]
+ elif self.full_config.use_system_cxx_lib:
if (os.path.isdir(str(self.full_config.use_system_cxx_lib))):
library_paths += [self.full_config.use_system_cxx_lib]
- pass
- elif self.full_config.cxx_runtime_root:
- library_paths += [self.full_config.cxx_runtime_root]
+
# Configure the abi library path
if self.full_config.abi_library_root:
library_paths += [self.full_config.abi_library_root]
@@ -181,6 +182,18 @@ class FreeBSDLocalTI(DefaultTargetInfo):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
+class NetBSDLocalTI(DefaultTargetInfo):
+ def __init__(self, full_config):
+ super(NetBSDLocalTI, self).__init__(full_config)
+
+ def add_locale_features(self, features):
+ add_common_locales(features, self.full_config.lit_config)
+
+ def add_cxx_link_flags(self, flags):
+ flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi',
+ '-lunwind']
+
+
class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
@@ -279,6 +292,7 @@ def make_target_info(full_config):
target_system = platform.system()
if target_system == 'Darwin': return DarwinLocalTI(full_config)
if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config)
+ if target_system == 'NetBSD': return NetBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
return DefaultTargetInfo(full_config)