Index: dashboard/dashboard/pinpoint/models/test_config_test.py |
diff --git a/dashboard/dashboard/pinpoint/models/test_config_test.py b/dashboard/dashboard/pinpoint/models/test_config_test.py |
new file mode 100644 |
index 0000000000000000000000000000000000000000..c389aa4e12a55c3c6823d10a1e764e5359eecee2 |
--- /dev/null |
+++ b/dashboard/dashboard/pinpoint/models/test_config_test.py |
@@ -0,0 +1,119 @@ |
+# Copyright 2017 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+import unittest |
+ |
+from dashboard.pinpoint.models import quest |
+from dashboard.pinpoint.models import test_config |
+ |
+ |
+_RUN_TEST_ARGUMENTS = [ |
+ 'benchmark_name', '--story-filter', 'story_name', |
+ '--pageset-repeat', '10', '--browser=release', |
+ '-v', '--upload-results', '--output-format=chartjson', |
+ '--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json', |
+ '--isolated-script-test-chartjson-output=' |
+ '${ISOLATED_OUTDIR}/chartjson-output.json', |
+] |
+ |
+ |
+class TestConfigTest(unittest.TestCase): |
+ |
+ def testTestConfig(self): |
+ self.assertIsInstance(test_config.TestConfig(None, None), |
+ test_config.EmptyConfig) |
+ request = { |
+ 'dimensions': '{}', |
+ 'benchmark': 'speedometer', |
+ } |
+ self.assertIsInstance( |
+ test_config.TestConfig('telemetry_perf_tests', request), |
+ test_config.TelemetryConfig) |
+ self.assertIsInstance(test_config.TestConfig('net_perftests', None), |
+ test_config.GTestConfig) |
+ |
+ |
+class EmptyConfigTest(unittest.TestCase): |
+ |
+ def testQuests(self): |
+ config = test_config.EmptyConfig() |
+ self.assertEqual(config.Quests(), ()) |
+ |
+ def testAsDict(self): |
+ config = test_config.EmptyConfig() |
+ self.assertEqual(config.AsDict(), {}) |
+ |
+ |
+class GTestConfigTest(unittest.TestCase): |
+ |
+ def testQuests(self): |
+ config = test_config.GTestConfig() |
+ self.assertEqual(config.Quests(), ()) |
+ |
+ def testAsDict(self): |
+ config = test_config.GTestConfig() |
+ self.assertEqual(config.AsDict(), {}) |
+ |
+ |
+class TelemetryConfigTest(unittest.TestCase): |
+ |
+ def testMissingArguments(self): |
+ with self.assertRaises(TypeError): |
+ test_config.TelemetryConfig({}) |
+ |
+ with self.assertRaises(TypeError): |
+ test_config.TelemetryConfig({'dimensions': '{}'}) |
+ |
+ with self.assertRaises(TypeError): |
+ test_config.TelemetryConfig({'benchmark': 'speedometer'}) |
+ |
+ def testInvalidArguments(self): |
+ with self.assertRaises(ValueError): |
+ test_config.TelemetryConfig({ |
+ 'dimensions': 'invalid json', |
+ 'benchmark': 'speedometer', |
+ }) |
+ |
+ with self.assertRaises(ValueError): |
+ test_config.TelemetryConfig({ |
+ 'dimensions': '{}', |
+ 'benchmark': 'speedometer', |
+ 'repeat_count': 'not a number', |
+ }) |
+ |
+ def testQuests(self): |
+ request = { |
+ 'dimensions': '{"key": "value"}', |
+ 'benchmark': 'benchmark_name', |
+ 'story': 'story_name', |
+ 'metric': 'metric_name', |
+ 'repeat_count': '10', |
+ } |
+ config = test_config.TelemetryConfig(request) |
+ |
+ expected_quests = [ |
+ quest.RunTest({'key': 'value'}, _RUN_TEST_ARGUMENTS), |
+ quest.ReadChartJsonValue('metric_name', 'story_name'), |
+ ] |
+ self.assertEqual(config.Quests(), expected_quests) |
+ |
+ def testAsDict(self): |
+ request = { |
+ 'dimensions': '{"key": "value"}', |
+ 'benchmark': 'page_cycler_v2_site_isolation.basic_oopif', |
+ 'story': 'http://www.fifa.com/', |
+ 'metric': 'pcv1-cold@@timeToFirstMeaningfulPaint_avg', |
+ 'repeat_count': '10', |
+ } |
+ config = test_config.TelemetryConfig(request) |
+ |
+ expected_dict = { |
+ 'dimensions': {'key': 'value'}, |
+ 'browser': 'release', |
+ 'benchmark': 'page_cycler_v2_site_isolation.basic_oopif', |
+ 'story': 'http://www.fifa.com/', |
+ 'metric': 'pcv1-cold@@timeToFirstMeaningfulPaint_avg', |
+ 'repeat_count': 10, |
+ } |
+ self.assertEqual(config.AsDict(), expected_dict) |