summaryrefslogtreecommitdiffstats
path: root/gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
diff options
context:
space:
mode:
authorpatrick <patrick@openbsd.org>2019-01-27 16:42:12 +0000
committerpatrick <patrick@openbsd.org>2019-01-27 16:42:12 +0000
commitb773203fb58f3ef282fb69c832d8710cab5bc82d (patch)
treee75913f147570fbd75169647b144df85b88a038c /gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
parenttweak errno in previous (diff)
downloadwireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.tar.xz
wireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.zip
Import LLVM 7.0.1 release including clang, lld and lldb.
Diffstat (limited to 'gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py')
-rw-r--r--gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py52
1 files changed, 52 insertions, 0 deletions
diff --git a/gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py b/gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
new file mode 100644
index 00000000000..5842f5a5ba3
--- /dev/null
+++ b/gnu/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
@@ -0,0 +1,52 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ # Create micro test results
+ for key,micro_name in cfg.items('micro-tests'):
+ micro_result = lit.Test.Result(getattr(lit.Test, result_code, ''))
+ # Load micro test additional metrics
+ for key,value_str in cfg.items('micro-results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ micro_result.addMetric(key, metric)
+ result.addMicroResult(micro_name, micro_result)
+
+ return result