Index: dashboard/dashboard/pinpoint/models/quest/run_test.py |
diff --git a/dashboard/dashboard/pinpoint/models/quest/run_test.py b/dashboard/dashboard/pinpoint/models/quest/run_test.py |
index 40eaf3ede2396cd4bf0e2e9a06e7b45f401a482d..f128c3057fe0e06d5a8bc31a9bd1328906006e27 100644 |
--- a/dashboard/dashboard/pinpoint/models/quest/run_test.py |
+++ b/dashboard/dashboard/pinpoint/models/quest/run_test.py |
@@ -8,9 +8,6 @@ This is the only Quest/Execution where the Execution has a reference back to |
modify the Quest. |
""" |
-import json |
-import os |
- |
from dashboard.pinpoint.models.quest import execution as execution_module |
from dashboard.pinpoint.models.quest import quest |
from dashboard.services import swarming_service |
@@ -21,19 +18,6 @@ class RunTestError(Exception): |
pass |
-class UnknownConfigError(RunTestError): |
- |
- def __init__(self, configuration): |
- self.configuration = configuration |
- super(UnknownConfigError, self).__init__( |
- 'There are no swarming bots corresponding to config "%s".' % |
- self.configuration) |
- |
- def __reduce__(self): |
- # http://stackoverflow.com/a/36342588 |
- return UnknownConfigError, (self.configuration,) |
- |
- |
class SwarmingTaskError(RunTestError): |
def __init__(self, task_id, state): |
@@ -64,30 +48,31 @@ class SwarmingTestError(RunTestError): |
class RunTest(quest.Quest): |
- def __init__(self, configuration, test_suite, test, repeat_count): |
- self._configuration = configuration |
- self._test_suite = test_suite |
- self._test = test |
- self._repeat_count = repeat_count |
+ def __init__(self, dimensions, extra_args): |
+ self._dimensions = dimensions |
+ self._extra_args = extra_args |
# We want subsequent executions use the same bot as the first one. |
self._first_execution = None |
+ def __eq__(self, other): |
+ return (isinstance(other, self.__class__) and |
perezju
2017/08/07 12:06:12
nit: ditto
dtu
2017/08/15 17:06:41
Done.
|
+ self._dimensions == other._dimensions and |
+ self._extra_args == other._extra_args and |
+ self._first_execution == other._first_execution) |
+ |
+ |
def __str__(self): |
- if self._test: |
- running = '/'.join((self._test_suite, self._test)) |
- else: |
- running = self._test_suite |
- return 'Run %s on %s' % (running, self._configuration) |
+ return 'Test' |
perezju
2017/08/07 12:06:12
Is there anything more useful we could return?
dtu
2017/08/15 17:06:41
Not sure if we need more with the current UI. We w
|
@property |
def retry_count(self): |
return 4 |
def Start(self, isolate_hash): |
- execution = _RunTestExecution(self._configuration, self._test_suite, |
- self._test, self._repeat_count, isolate_hash, |
- first_execution=self._first_execution) |
+ execution = _RunTestExecution( |
+ self._dimensions, self._extra_args, isolate_hash, |
+ first_execution=self._first_execution) |
if not self._first_execution: |
self._first_execution = execution |
@@ -97,13 +82,11 @@ class RunTest(quest.Quest): |
class _RunTestExecution(execution_module.Execution): |
- def __init__(self, configuration, test_suite, test, repeat_count, |
- isolate_hash, first_execution=None): |
+ def __init__(self, dimensions, extra_args, isolate_hash, |
+ first_execution=None): |
super(_RunTestExecution, self).__init__() |
- self._configuration = configuration |
- self._test_suite = test_suite |
- self._test = test |
- self._repeat_count = repeat_count |
+ self._dimensions = dimensions |
+ self._extra_args = extra_args |
self._isolate_hash = isolate_hash |
self._first_execution = first_execution |
@@ -150,55 +133,25 @@ class _RunTestExecution(execution_module.Execution): |
else: |
return |
- # TODO: Support non-Telemetry tests. |
- extra_args = [self._test_suite] |
- if self._test: |
- extra_args += ('--story-filter', self._test) |
- if self._repeat_count != 1: |
- extra_args += ('--pageset-repeat', str(self._repeat_count)) |
- # TODO: Use the correct browser for Android and 64-bit Windows. |
- extra_args += [ |
- '-v', '--upload-results', |
- '--output-format=chartjson', '--browser=release', |
- '--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json', |
- '--isolated-script-test-chartjson-output=' |
- '${ISOLATED_OUTDIR}/chartjson-output.json', |
- ] |
- |
dimensions = [{'key': 'pool', 'value': 'Chrome-perf-pinpoint'}] |
if self._first_execution: |
dimensions.append({'key': 'id', 'value': self._first_execution.bot_id}) |
else: |
- dimensions += _ConfigurationDimensions(self._configuration) |
+ dimensions += self._dimensions |
body = { |
- 'name': 'Pinpoint job on %s' % self._configuration, |
+ 'name': 'Pinpoint job', |
'user': 'Pinpoint', |
'priority': '100', |
'expiration_secs': '600', |
'properties': { |
'inputs_ref': {'isolated': self._isolate_hash}, |
- 'extra_args': extra_args, |
+ 'extra_args': self._extra_args, |
'dimensions': dimensions, |
'execution_timeout_secs': '3600', |
'io_timeout_secs': '3600', |
}, |
- 'tags': [ |
- 'configuration:' + self._configuration, |
- ], |
} |
response = swarming_service.Tasks().New(body) |
self._task_id = response['task_id'] |
- |
- |
-def _ConfigurationDimensions(configuration): |
- bot_dimensions_path = os.path.join(os.path.dirname(__file__), |
- 'bot_dimensions.json') |
- with open(bot_dimensions_path) as bot_dimensions_file: |
- bot_dimensions = json.load(bot_dimensions_file) |
- |
- if configuration not in bot_dimensions: |
- raise UnknownConfigError(configuration) |
- |
- return bot_dimensions[configuration] |