summaryrefslogtreecommitdiffstats
path: root/lib/libcxx/utils/google-benchmark/tools/gbench
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libcxx/utils/google-benchmark/tools/gbench')
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json102
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json102
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test2_run.json81
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json65
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json65
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/__init__.py8
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/report.py522
-rw-r--r--lib/libcxx/utils/google-benchmark/tools/gbench/util.py164
8 files changed, 0 insertions, 1109 deletions
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
deleted file mode 100644
index d7ec6a9c8f6..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run1.json
+++ /dev/null
@@ -1,102 +0,0 @@
-{
- "context": {
- "date": "2016-08-02 17:44:46",
- "num_cpus": 4,
- "mhz_per_cpu": 4228,
- "cpu_scaling_enabled": false,
- "library_build_type": "release"
- },
- "benchmarks": [
- {
- "name": "BM_SameTimes",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 10,
- "time_unit": "ns"
- },
- {
- "name": "BM_2xFaster",
- "iterations": 1000,
- "real_time": 50,
- "cpu_time": 50,
- "time_unit": "ns"
- },
- {
- "name": "BM_2xSlower",
- "iterations": 1000,
- "real_time": 50,
- "cpu_time": 50,
- "time_unit": "ns"
- },
- {
- "name": "BM_1PercentFaster",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_1PercentSlower",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentFaster",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentSlower",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_100xSlower",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_100xFaster",
- "iterations": 1000,
- "real_time": 10000,
- "cpu_time": 10000,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentCPUToTime",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_ThirdFaster",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_BadTimeUnit",
- "iterations": 1000,
- "real_time": 0.4,
- "cpu_time": 0.5,
- "time_unit": "s"
- },
- {
- "name": "BM_DifferentTimeUnit",
- "iterations": 1,
- "real_time": 1,
- "cpu_time": 1,
- "time_unit": "s"
- }
- ]
-}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
deleted file mode 100644
index 59a5ffaca4d..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test1_run2.json
+++ /dev/null
@@ -1,102 +0,0 @@
-{
- "context": {
- "date": "2016-08-02 17:44:46",
- "num_cpus": 4,
- "mhz_per_cpu": 4228,
- "cpu_scaling_enabled": false,
- "library_build_type": "release"
- },
- "benchmarks": [
- {
- "name": "BM_SameTimes",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 10,
- "time_unit": "ns"
- },
- {
- "name": "BM_2xFaster",
- "iterations": 1000,
- "real_time": 25,
- "cpu_time": 25,
- "time_unit": "ns"
- },
- {
- "name": "BM_2xSlower",
- "iterations": 20833333,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_1PercentFaster",
- "iterations": 1000,
- "real_time": 98.9999999,
- "cpu_time": 98.9999999,
- "time_unit": "ns"
- },
- {
- "name": "BM_1PercentSlower",
- "iterations": 1000,
- "real_time": 100.9999999,
- "cpu_time": 100.9999999,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentFaster",
- "iterations": 1000,
- "real_time": 90,
- "cpu_time": 90,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentSlower",
- "iterations": 1000,
- "real_time": 110,
- "cpu_time": 110,
- "time_unit": "ns"
- },
- {
- "name": "BM_100xSlower",
- "iterations": 1000,
- "real_time": 1.0000e+04,
- "cpu_time": 1.0000e+04,
- "time_unit": "ns"
- },
- {
- "name": "BM_100xFaster",
- "iterations": 1000,
- "real_time": 100,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_10PercentCPUToTime",
- "iterations": 1000,
- "real_time": 110,
- "cpu_time": 90,
- "time_unit": "ns"
- },
- {
- "name": "BM_ThirdFaster",
- "iterations": 1000,
- "real_time": 66.665,
- "cpu_time": 66.664,
- "time_unit": "ns"
- },
- {
- "name": "BM_BadTimeUnit",
- "iterations": 1000,
- "real_time": 0.04,
- "cpu_time": 0.6,
- "time_unit": "s"
- },
- {
- "name": "BM_DifferentTimeUnit",
- "iterations": 1,
- "real_time": 1,
- "cpu_time": 1,
- "time_unit": "ns"
- }
- ]
-}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test2_run.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test2_run.json
deleted file mode 100644
index 15bc6980304..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test2_run.json
+++ /dev/null
@@ -1,81 +0,0 @@
-{
- "context": {
- "date": "2016-08-02 17:44:46",
- "num_cpus": 4,
- "mhz_per_cpu": 4228,
- "cpu_scaling_enabled": false,
- "library_build_type": "release"
- },
- "benchmarks": [
- {
- "name": "BM_Hi",
- "iterations": 1234,
- "real_time": 42,
- "cpu_time": 24,
- "time_unit": "ms"
- },
- {
- "name": "BM_Zero",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 10,
- "time_unit": "ns"
- },
- {
- "name": "BM_Zero/4",
- "iterations": 4000,
- "real_time": 40,
- "cpu_time": 40,
- "time_unit": "ns"
- },
- {
- "name": "Prefix/BM_Zero",
- "iterations": 2000,
- "real_time": 20,
- "cpu_time": 20,
- "time_unit": "ns"
- },
- {
- "name": "Prefix/BM_Zero/3",
- "iterations": 3000,
- "real_time": 30,
- "cpu_time": 30,
- "time_unit": "ns"
- },
- {
- "name": "BM_One",
- "iterations": 5000,
- "real_time": 5,
- "cpu_time": 5,
- "time_unit": "ns"
- },
- {
- "name": "BM_One/4",
- "iterations": 2000,
- "real_time": 20,
- "cpu_time": 20,
- "time_unit": "ns"
- },
- {
- "name": "Prefix/BM_One",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 10,
- "time_unit": "ns"
- },
- {
- "name": "Prefix/BM_One/3",
- "iterations": 1500,
- "real_time": 15,
- "cpu_time": 15,
- "time_unit": "ns"
- },
- {
- "name": "BM_Bye",
- "iterations": 5321,
- "real_time": 11,
- "cpu_time": 63,
- "time_unit": "ns"
- }
- ]
-}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
deleted file mode 100644
index 49f8b061437..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
- "context": {
- "date": "2016-08-02 17:44:46",
- "num_cpus": 4,
- "mhz_per_cpu": 4228,
- "cpu_scaling_enabled": false,
- "library_build_type": "release"
- },
- "benchmarks": [
- {
- "name": "BM_One",
- "run_type": "aggregate",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 100,
- "time_unit": "ns"
- },
- {
- "name": "BM_Two",
- "iterations": 1000,
- "real_time": 9,
- "cpu_time": 90,
- "time_unit": "ns"
- },
- {
- "name": "BM_Two",
- "iterations": 1000,
- "real_time": 8,
- "cpu_time": 86,
- "time_unit": "ns"
- },
- {
- "name": "short",
- "run_type": "aggregate",
- "iterations": 1000,
- "real_time": 8,
- "cpu_time": 80,
- "time_unit": "ns"
- },
- {
- "name": "short",
- "run_type": "aggregate",
- "iterations": 1000,
- "real_time": 8,
- "cpu_time": 77,
- "time_unit": "ns"
- },
- {
- "name": "medium",
- "run_type": "iteration",
- "iterations": 1000,
- "real_time": 8,
- "cpu_time": 80,
- "time_unit": "ns"
- },
- {
- "name": "medium",
- "run_type": "iteration",
- "iterations": 1000,
- "real_time": 9,
- "cpu_time": 82,
- "time_unit": "ns"
- }
- ]
-}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json b/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
deleted file mode 100644
index acc5ba17aed..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
- "context": {
- "date": "2016-08-02 17:44:46",
- "num_cpus": 4,
- "mhz_per_cpu": 4228,
- "cpu_scaling_enabled": false,
- "library_build_type": "release"
- },
- "benchmarks": [
- {
- "name": "BM_One",
- "iterations": 1000,
- "real_time": 9,
- "cpu_time": 110,
- "time_unit": "ns"
- },
- {
- "name": "BM_Two",
- "run_type": "aggregate",
- "iterations": 1000,
- "real_time": 10,
- "cpu_time": 89,
- "time_unit": "ns"
- },
- {
- "name": "BM_Two",
- "iterations": 1000,
- "real_time": 7,
- "cpu_time": 72,
- "time_unit": "ns"
- },
- {
- "name": "short",
- "run_type": "aggregate",
- "iterations": 1000,
- "real_time": 7,
- "cpu_time": 75,
- "time_unit": "ns"
- },
- {
- "name": "short",
- "run_type": "aggregate",
- "iterations": 762,
- "real_time": 4.54,
- "cpu_time": 66.6,
- "time_unit": "ns"
- },
- {
- "name": "short",
- "run_type": "iteration",
- "iterations": 1000,
- "real_time": 800,
- "cpu_time": 1,
- "time_unit": "ns"
- },
- {
- "name": "medium",
- "run_type": "iteration",
- "iterations": 1200,
- "real_time": 5,
- "cpu_time": 53,
- "time_unit": "ns"
- }
- ]
-}
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/__init__.py b/lib/libcxx/utils/google-benchmark/tools/gbench/__init__.py
deleted file mode 100644
index fce1a1acfbb..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""Google Benchmark tooling"""
-
-__author__ = 'Eric Fiselier'
-__email__ = 'eric@efcs.ca'
-__versioninfo__ = (0, 5, 0)
-__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
-
-__all__ = []
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/report.py b/lib/libcxx/utils/google-benchmark/tools/gbench/report.py
deleted file mode 100644
index 5085b931947..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/report.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import unittest
-"""report.py - Utilities for reporting statistics about benchmark results
-"""
-import os
-import re
-import copy
-
-from scipy.stats import mannwhitneyu
-
-
-class BenchmarkColor(object):
- def __init__(self, name, code):
- self.name = name
- self.code = code
-
- def __repr__(self):
- return '%s%r' % (self.__class__.__name__,
- (self.name, self.code))
-
- def __format__(self, format):
- return self.code
-
-
-# Benchmark Colors Enumeration
-BC_NONE = BenchmarkColor('NONE', '')
-BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
-BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
-BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
-BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
-BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
-BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
-BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
-BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
-BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
-BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
-BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
-
-UTEST_MIN_REPETITIONS = 2
-UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
-UTEST_COL_NAME = "_pvalue"
-
-
-def color_format(use_color, fmt_str, *args, **kwargs):
- """
- Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
- 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
- is False then all color codes in 'args' and 'kwargs' are replaced with
- the empty string.
- """
- assert use_color is True or use_color is False
- if not use_color:
- args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
- for arg in args]
- kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
- for key, arg in kwargs.items()}
- return fmt_str.format(*args, **kwargs)
-
-
-def find_longest_name(benchmark_list):
- """
- Return the length of the longest benchmark name in a given list of
- benchmark JSON objects
- """
- longest_name = 1
- for bc in benchmark_list:
- if len(bc['name']) > longest_name:
- longest_name = len(bc['name'])
- return longest_name
-
-
-def calculate_change(old_val, new_val):
- """
- Return a float representing the decimal change between old_val and new_val.
- """
- if old_val == 0 and new_val == 0:
- return 0.0
- if old_val == 0:
- return float(new_val - old_val) / (float(old_val + new_val) / 2)
- return float(new_val - old_val) / abs(old_val)
-
-
-def filter_benchmark(json_orig, family, replacement=""):
- """
- Apply a filter to the json, and only leave the 'family' of benchmarks.
- """
- regex = re.compile(family)
- filtered = {}
- filtered['benchmarks'] = []
- for be in json_orig['benchmarks']:
- if not regex.search(be['name']):
- continue
- filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
- filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
- filtered['benchmarks'].append(filteredbench)
- return filtered
-
-
-def get_unique_benchmark_names(json):
- """
- While *keeping* the order, give all the unique 'names' used for benchmarks.
- """
- seen = set()
- uniqued = [x['name'] for x in json['benchmarks']
- if x['name'] not in seen and
- (seen.add(x['name']) or True)]
- return uniqued
-
-
-def intersect(list1, list2):
- """
- Given two lists, get a new list consisting of the elements only contained
- in *both of the input lists*, while preserving the ordering.
- """
- return [x for x in list1 if x in list2]
-
-
-def partition_benchmarks(json1, json2):
- """
- While preserving the ordering, find benchmarks with the same names in
- both of the inputs, and group them.
- (i.e. partition/filter into groups with common name)
- """
- json1_unique_names = get_unique_benchmark_names(json1)
- json2_unique_names = get_unique_benchmark_names(json2)
- names = intersect(json1_unique_names, json2_unique_names)
- partitions = []
- for name in names:
- # Pick the time unit from the first entry of the lhs benchmark.
- time_unit = (x['time_unit']
- for x in json1['benchmarks'] if x['name'] == name).next()
- # Filter by name and time unit.
- lhs = [x for x in json1['benchmarks'] if x['name'] == name and
- x['time_unit'] == time_unit]
- rhs = [x for x in json2['benchmarks'] if x['name'] == name and
- x['time_unit'] == time_unit]
- partitions.append([lhs, rhs])
- return partitions
-
-
-def extract_field(partition, field_name):
- # The count of elements may be different. We want *all* of them.
- lhs = [x[field_name] for x in partition[0]]
- rhs = [x[field_name] for x in partition[1]]
- return [lhs, rhs]
-
-
-def print_utest(partition, utest_alpha, first_col_width, use_color=True):
- timings_time = extract_field(partition, 'real_time')
- timings_cpu = extract_field(partition, 'cpu_time')
-
- min_rep_cnt = min(len(timings_time[0]),
- len(timings_time[1]),
- len(timings_cpu[0]),
- len(timings_cpu[1]))
-
- # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
- if min_rep_cnt < UTEST_MIN_REPETITIONS:
- return []
-
- def get_utest_color(pval):
- return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
-
- time_pvalue = mannwhitneyu(
- timings_time[0], timings_time[1], alternative='two-sided').pvalue
- cpu_pvalue = mannwhitneyu(
- timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
-
- dsc = "U Test, Repetitions: {} vs {}".format(
- len(timings_cpu[0]), len(timings_cpu[1]))
- dsc_color = BC_OKGREEN
-
- if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
- dsc_color = BC_WARNING
- dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
- UTEST_OPTIMAL_REPETITIONS)
-
- special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
-
- last_name = partition[0][0]['name']
- return [color_format(use_color,
- special_str,
- BC_HEADER,
- "{}{}".format(last_name, UTEST_COL_NAME),
- first_col_width,
- get_utest_color(time_pvalue), time_pvalue,
- get_utest_color(cpu_pvalue), cpu_pvalue,
- dsc_color, dsc,
- endc=BC_ENDC)]
-
-
-def generate_difference_report(
- json1,
- json2,
- display_aggregates_only=False,
- utest=False,
- utest_alpha=0.05,
- use_color=True):
- """
- Calculate and report the difference between each test of two benchmarks
- runs specified as 'json1' and 'json2'.
- """
- assert utest is True or utest is False
- first_col_width = find_longest_name(json1['benchmarks'])
-
- def find_test(name):
- for b in json2['benchmarks']:
- if b['name'] == name:
- return b
- return None
-
- first_col_width = max(
- first_col_width,
- len('Benchmark'))
- first_col_width += len(UTEST_COL_NAME)
- first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
- 'Benchmark', 12 + first_col_width)
- output_strs = [first_line, '-' * len(first_line)]
-
- partitions = partition_benchmarks(json1, json2)
- for partition in partitions:
- # Careful, we may have different repetition count.
- for i in range(min(len(partition[0]), len(partition[1]))):
- bn = partition[0][i]
- other_bench = partition[1][i]
-
- # *If* we were asked to only display aggregates,
- # and if it is non-aggregate, then skip it.
- if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
- assert bn['run_type'] == other_bench['run_type']
- if bn['run_type'] != 'aggregate':
- continue
-
- fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
-
- def get_color(res):
- if res > 0.05:
- return BC_FAIL
- elif res > -0.07:
- return BC_WHITE
- else:
- return BC_CYAN
-
- tres = calculate_change(bn['real_time'], other_bench['real_time'])
- cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
- output_strs += [color_format(use_color,
- fmt_str,
- BC_HEADER,
- bn['name'],
- first_col_width,
- get_color(tres),
- tres,
- get_color(cpures),
- cpures,
- bn['real_time'],
- other_bench['real_time'],
- bn['cpu_time'],
- other_bench['cpu_time'],
- endc=BC_ENDC)]
-
- # After processing the whole partition, if requested, do the U test.
- if utest:
- output_strs += print_utest(partition,
- utest_alpha=utest_alpha,
- first_col_width=first_col_width,
- use_color=use_color)
-
- return output_strs
-
-
-###############################################################################
-# Unit tests
-
-
-class TestGetUniqueBenchmarkNames(unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput = os.path.join(testInputs, 'test3_run0.json')
- with open(testOutput, 'r') as f:
- json = json.load(f)
- return json
-
- def test_basic(self):
- expect_lines = [
- 'BM_One',
- 'BM_Two',
- 'short', # These two are not sorted
- 'medium', # These two are not sorted
- ]
- json = self.load_results()
- output_lines = get_unique_benchmark_names(json)
- print("\n")
- print("\n".join(output_lines))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- self.assertEqual(expect_lines[i], output_lines[i])
-
-
-class TestReportDifference(unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test1_run1.json')
- testOutput2 = os.path.join(testInputs, 'test1_run2.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_basic(self):
- expect_lines = [
- ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
- ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
- ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
- ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
- ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
- ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
- ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
- ['BM_100xSlower', '+99.0000', '+99.0000',
- '100', '10000', '100', '10000'],
- ['BM_100xFaster', '-0.9900', '-0.9900',
- '10000', '100', '10000', '100'],
- ['BM_10PercentCPUToTime', '+0.1000',
- '-0.1000', '100', '110', '100', '90'],
- ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
- ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
- ]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, use_color=False)
- output_lines = output_lines_with_header[2:]
- print("\n")
- print("\n".join(output_lines_with_header))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(len(parts), 7)
- self.assertEqual(expect_lines[i], parts)
-
-
-class TestReportDifferenceBetweenFamilies(unittest.TestCase):
- def load_result(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput = os.path.join(testInputs, 'test2_run.json')
- with open(testOutput, 'r') as f:
- json = json.load(f)
- return json
-
- def test_basic(self):
- expect_lines = [
- ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
- ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
- ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
- ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
- ]
- json = self.load_result()
- json1 = filter_benchmark(json, "BM_Z.ro", ".")
- json2 = filter_benchmark(json, "BM_O.e", ".")
- output_lines_with_header = generate_difference_report(
- json1, json2, use_color=False)
- output_lines = output_lines_with_header[2:]
- print("\n")
- print("\n".join(output_lines_with_header))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(len(parts), 7)
- self.assertEqual(expect_lines[i], parts)
-
-
-class TestReportDifferenceWithUTest(unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_utest(self):
- expect_lines = []
- expect_lines = [
- ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
- ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
- ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
- ['BM_Two_pvalue',
- '0.6985',
- '0.6985',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '2.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
- ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
- ['short_pvalue',
- '0.7671',
- '0.1489',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '3.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
- ]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, utest=True, utest_alpha=0.05, use_color=False)
- output_lines = output_lines_with_header[2:]
- print("\n")
- print("\n".join(output_lines_with_header))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(expect_lines[i], parts)
-
-
-class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
- unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_utest(self):
- expect_lines = []
- expect_lines = [
- ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
- ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
- ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
- ['BM_Two_pvalue',
- '0.6985',
- '0.6985',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '2.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
- ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
- ['short_pvalue',
- '0.7671',
- '0.1489',
- 'U',
- 'Test,',
- 'Repetitions:',
- '2',
- 'vs',
- '3.',
- 'WARNING:',
- 'Results',
- 'unreliable!',
- '9+',
- 'repetitions',
- 'recommended.'],
- ]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, display_aggregates_only=True,
- utest=True, utest_alpha=0.05, use_color=False)
- output_lines = output_lines_with_header[2:]
- print("\n")
- print("\n".join(output_lines_with_header))
- self.assertEqual(len(output_lines), len(expect_lines))
- for i in range(0, len(output_lines)):
- parts = [x for x in output_lines[i].split(' ') if x]
- self.assertEqual(expect_lines[i], parts)
-
-
-if __name__ == '__main__':
- unittest.main()
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
-# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
-# kate: indent-mode python; remove-trailing-spaces modified;
diff --git a/lib/libcxx/utils/google-benchmark/tools/gbench/util.py b/lib/libcxx/utils/google-benchmark/tools/gbench/util.py
deleted file mode 100644
index 1f8e8e2c479..00000000000
--- a/lib/libcxx/utils/google-benchmark/tools/gbench/util.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""util.py - General utilities for running, loading, and processing benchmarks
-"""
-import json
-import os
-import tempfile
-import subprocess
-import sys
-
-# Input file type enumeration
-IT_Invalid = 0
-IT_JSON = 1
-IT_Executable = 2
-
-_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
-
-
-def is_executable_file(filename):
- """
- Return 'True' if 'filename' names a valid file which is likely
- an executable. A file is considered an executable if it starts with the
- magic bytes for a EXE, Mach O, or ELF file.
- """
- if not os.path.isfile(filename):
- return False
- with open(filename, mode='rb') as f:
- magic_bytes = f.read(_num_magic_bytes)
- if sys.platform == 'darwin':
- return magic_bytes in [
- b'\xfe\xed\xfa\xce', # MH_MAGIC
- b'\xce\xfa\xed\xfe', # MH_CIGAM
- b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
- b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
- b'\xca\xfe\xba\xbe', # FAT_MAGIC
- b'\xbe\xba\xfe\xca' # FAT_CIGAM
- ]
- elif sys.platform.startswith('win'):
- return magic_bytes == b'MZ'
- else:
- return magic_bytes == b'\x7FELF'
-
-
-def is_json_file(filename):
- """
- Returns 'True' if 'filename' names a valid JSON output file.
- 'False' otherwise.
- """
- try:
- with open(filename, 'r') as f:
- json.load(f)
- return True
- except BaseException:
- pass
- return False
-
-
-def classify_input_file(filename):
- """
- Return a tuple (type, msg) where 'type' specifies the classified type
- of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
- string represeting the error.
- """
- ftype = IT_Invalid
- err_msg = None
- if not os.path.exists(filename):
- err_msg = "'%s' does not exist" % filename
- elif not os.path.isfile(filename):
- err_msg = "'%s' does not name a file" % filename
- elif is_executable_file(filename):
- ftype = IT_Executable
- elif is_json_file(filename):
- ftype = IT_JSON
- else:
- err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
- return ftype, err_msg
-
-
-def check_input_file(filename):
- """
- Classify the file named by 'filename' and return the classification.
- If the file is classified as 'IT_Invalid' print an error message and exit
- the program.
- """
- ftype, msg = classify_input_file(filename)
- if ftype == IT_Invalid:
- print("Invalid input file: %s" % msg)
- sys.exit(1)
- return ftype
-
-
-def find_benchmark_flag(prefix, benchmark_flags):
- """
- Search the specified list of flags for a flag matching `<prefix><arg>` and
- if it is found return the arg it specifies. If specified more than once the
- last value is returned. If the flag is not found None is returned.
- """
- assert prefix.startswith('--') and prefix.endswith('=')
- result = None
- for f in benchmark_flags:
- if f.startswith(prefix):
- result = f[len(prefix):]
- return result
-
-
-def remove_benchmark_flags(prefix, benchmark_flags):
- """
- Return a new list containing the specified benchmark_flags except those
- with the specified prefix.
- """
- assert prefix.startswith('--') and prefix.endswith('=')
- return [f for f in benchmark_flags if not f.startswith(prefix)]
-
-
-def load_benchmark_results(fname):
- """
- Read benchmark output from a file and return the JSON object.
- REQUIRES: 'fname' names a file containing JSON benchmark output.
- """
- with open(fname, 'r') as f:
- return json.load(f)
-
-
-def run_benchmark(exe_name, benchmark_flags):
- """
- Run a benchmark specified by 'exe_name' with the specified
- 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
- real time console output.
- RETURNS: A JSON object representing the benchmark output
- """
- output_name = find_benchmark_flag('--benchmark_out=',
- benchmark_flags)
- is_temp_output = False
- if output_name is None:
- is_temp_output = True
- thandle, output_name = tempfile.mkstemp()
- os.close(thandle)
- benchmark_flags = list(benchmark_flags) + \
- ['--benchmark_out=%s' % output_name]
-
- cmd = [exe_name] + benchmark_flags
- print("RUNNING: %s" % ' '.join(cmd))
- exitCode = subprocess.call(cmd)
- if exitCode != 0:
- print('TEST FAILED...')
- sys.exit(exitCode)
- json_res = load_benchmark_results(output_name)
- if is_temp_output:
- os.unlink(output_name)
- return json_res
-
-
-def run_or_load_benchmark(filename, benchmark_flags):
- """
- Get the results for a specified benchmark. If 'filename' specifies
- an executable benchmark then the results are generated by running the
- benchmark. Otherwise 'filename' must name a valid JSON output file,
- which is loaded and the result returned.
- """
- ftype = check_input_file(filename)
- if ftype == IT_JSON:
- return load_benchmark_results(filename)
- elif ftype == IT_Executable:
- return run_benchmark(filename, benchmark_flags)
- else:
- assert False # This branch is unreachable