Index: third_party/lit/tests/Inputs/test-data/lit.cfg |
diff --git a/third_party/lit/tests/Inputs/test-data/lit.cfg b/third_party/lit/tests/Inputs/test-data/lit.cfg |
new file mode 100644 |
index 0000000000000000000000000000000000000000..f5aba7b217748838d9ceebde9b35f721882bfac5 |
--- /dev/null |
+++ b/third_party/lit/tests/Inputs/test-data/lit.cfg |
@@ -0,0 +1,44 @@ |
+import os |
+try: |
+ import ConfigParser |
+except ImportError: |
+ import configparser as ConfigParser |
+ |
+import lit.formats |
+import lit.Test |
+ |
+class DummyFormat(lit.formats.FileBasedTest): |
+ def execute(self, test, lit_config): |
+ # In this dummy format, expect that each test file is actually just a |
+ # .ini format dump of the results to report. |
+ |
+ source_path = test.getSourcePath() |
+ |
+ cfg = ConfigParser.ConfigParser() |
+ cfg.read(source_path) |
+ |
+ # Create the basic test result. |
+ result_code = cfg.get('global', 'result_code') |
+ result_output = cfg.get('global', 'result_output') |
+ result = lit.Test.Result(getattr(lit.Test, result_code), |
+ result_output) |
+ |
+ # Load additional metrics. |
+ for key,value_str in cfg.items('results'): |
+ value = eval(value_str) |
+ if isinstance(value, int): |
+ metric = lit.Test.IntMetricValue(value) |
+ elif isinstance(value, float): |
+ metric = lit.Test.RealMetricValue(value) |
+ else: |
+ raise RuntimeError("unsupported result type") |
+ result.addMetric(key, metric) |
+ |
+ return result |
+ |
+config.name = 'test-data' |
+config.suffixes = ['.ini'] |
+config.test_format = DummyFormat() |
+config.test_source_root = None |
+config.test_exec_root = None |
+config.target_triple = None |