Chromium Code Reviews| Index: dashboard/dashboard/pinpoint/models/test_config.py |
| diff --git a/dashboard/dashboard/pinpoint/models/test_config.py b/dashboard/dashboard/pinpoint/models/test_config.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..f4deaf10887e61b56de9821132496ff9c8951328 |
| --- /dev/null |
| +++ b/dashboard/dashboard/pinpoint/models/test_config.py |
| @@ -0,0 +1,103 @@ |
| +# Copyright 2017 The Chromium Authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +import json |
| + |
| +from dashboard.pinpoint.models import quest as quest_module |
| + |
| + |
| +_ERROR_NO_BENCHMARK = 'Missing "benchmark" argument.' |
| + |
| + |
| +def TestConfig(target, request): |
| + if not target: |
| + return EmptyConfig() |
| + |
| + if target in ('telemetry_perf_tests', 'telemetry_perf_webview_tests'): |
| + return TelemetryConfig(request) |
| + |
| + return GTestConfig() |
| + |
| + |
| +class EmptyConfig(object): |
| + |
| + def Quests(self): |
| + return () |
| + |
| + def AsDict(self): |
| + return {} |
| + |
| + |
| +class GTestConfig(object): |
| + |
| + @property |
| + def _arguments(self): |
| + return [ |
| + '--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json', |
| + '--isolated-script-test-chartjson-output=' |
| + '${ISOLATED_OUTDIR}/chartjson-output.json', |
| + ] |
| + |
| + def Quests(self): |
| + # TODO |
| + return () |
| + |
| + def AsDict(self): |
| + return {} |
| + |
| + |
| +class TelemetryConfig(object): |
| + |
| + def __init__(self, request): |
| + self.dimensions = json.loads(request.get('dimensions')) |
| + # TODO: Use the correct browser for Android and 64-bit Windows. |
| + self.browser = 'release' |
| + self.benchmark = request.get('benchmark') |
| + self.story = request.get('story') |
| + self.metric = request.get('metric') |
| + self.repeat_count = int(request.get('repeat_count', 1)) |
| + |
| + if not self.benchmark: |
| + raise TypeError('Missing "benchmark" argument.') |
| + |
| + @property |
| + def _arguments(self): |
|
perezju
2017/08/07 12:06:12
nit: not a strong opinion, but I would lean toward
dtu
2017/08/15 17:06:41
Done.
|
| + arguments = [self.benchmark] |
| + |
| + if self.story: |
| + arguments += ('--story-filter', self.story) |
| + |
| + if self.repeat_count != 1: |
| + arguments += ('--pageset-repeat', str(self.repeat_count)) |
| + |
| + arguments.append('--browser=' + self.browser) |
| + |
| + arguments += ['-v', '--upload-results', '--output-format=chartjson'] |
| + arguments += [ |
| + '--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json', |
| + '--isolated-script-test-chartjson-output=' |
| + '${ISOLATED_OUTDIR}/chartjson-output.json', |
| + ] |
|
perezju
2017/08/07 12:06:12
Factor out these common args to a module level (pr
dtu
2017/08/15 17:06:41
It will make more sense in the follow-up :)
|
| + |
| + return arguments |
| + |
| + def Quests(self): |
| + quests = [] |
| + |
| + quests.append(quest_module.RunTest(self.dimensions, self._arguments)) |
| + |
| + if self.metric: |
| + quests.append(quest_module.ReadChartJsonValue(self.metric, self.story)) |
| + |
| + return quests |
| + |
| + def AsDict(self): |
| + return { |
| + 'dimensions': self.dimensions, |
| + 'browser': self.browser, |
| + 'benchmark': self.benchmark, |
| + 'story': self.story, |
| + 'metric': self.metric, |
| + 'repeat_count': self.repeat_count, |
| + } |