Chromium Code Reviews| Index: build/android/pylib/perf/test_runner.py |
| diff --git a/build/android/pylib/perf/test_runner.py b/build/android/pylib/perf/test_runner.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..96e943f9113de9d70fc366d1612610ec9495e322 |
| --- /dev/null |
| +++ b/build/android/pylib/perf/test_runner.py |
| @@ -0,0 +1,142 @@ |
| +# Copyright 2013 The Chromium Authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +"""Runs a perf test on a single device. |
| + |
| +Our buildbot infrastructure requires each slave to run steps serially. |
| +This is sub-optimal for android, where these steps can run independently on |
| +multiple connected devices. |
| + |
| +The buildbots will run this script multiple times per cycle: |
| +- First: all steps listed in --steps in will be executed in parallel using all |
| +connected devices. Step results will be pickled to disk. Each step has a unique |
| +name. The result code will be ignored if the step name is listed in |
| +--flaky_steps. |
|
frankf
2013/08/12 23:09:52
flaky_steps -> flaky-steps
bulach
2013/08/13 08:58:19
Done.
|
| +The buildbot will treat this step as a regular step, and will not process any |
| +graph data. |
| + |
| +- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file |
| +with the step results previously saved. The buildbot will then process the graph |
| +data accordingly. |
| + |
| +The JSON steps file contains a dictionary in the format: |
| +{ |
| + "step_name_foo": "script_to_execute foo", |
| + "step_name_bar": "script_to_execute bar" |
| +} |
| + |
| +The JSON flaky steps file contains a list with step names which results should |
| +be ignored: |
| +[ |
| + "step_name_foo", |
| + "step_name_bar" |
| +] |
| + |
| +Note that script_to_execute necessarily have to take at least the following |
| +options: |
| + --device: the serial number to be passed to all adb commands. |
| + --keep_test_server_ports: indicates it's being run as a shard, and shouldn't |
| + reset test server port allocation. |
| +""" |
| + |
| +import datetime |
| +import pexpect |
| +import pickle |
| +import os |
| +import sys |
| + |
| +from pylib import constants |
| +from pylib.base import base_test_result |
| +from pylib.base import base_test_runner |
| + |
| + |
| +_OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results') |
| + |
| + |
| +class TestPrinter(base_test_runner.BaseTestRunner): |
|
frankf
2013/08/12 23:09:52
docstring here and below
bulach
2013/08/13 08:58:19
Done.
|
| + def __init__(self, test_options, device, shard_index): |
| + super(TestPrinter, self).__init__(device, None, 'Release') |
| + |
| + def RunTest(self, test_name): |
| + file_name = os.path.join(_OUTPUT_DIR, test_name) |
| + if not os.path.exists(file_name): |
| + print 'File not found ', file_name |
|
frankf
2013/08/12 23:09:52
Use logging instead of print?
bulach
2013/08/13 08:58:19
Done.
|
| + fail = base_test_result.ResultType.FAIL |
| + return base_test_result.TestRunResults(file_name, fail), False |
|
frankf
2013/08/12 23:09:52
file_name -> test_name?
frankf
2013/08/12 23:09:52
False -> None
|
| + |
| + with file(file_name, 'r') as f: |
| + persisted_result = pickle.loads(f.read()) |
| + print persisted_result['output'] |
| + |
| + results = base_test_result.TestRunResults() |
| + result = base_test_result.BaseTestResult(test_name, |
|
frankf
2013/08/12 23:09:52
You can get rid of intermediate var
bulach
2013/08/13 08:58:19
Done.
|
| + persisted_result['result_type']) |
| + results.AddResult(result) |
| + return results, False |
|
frankf
2013/08/12 23:09:52
None
bulach
2013/08/13 08:58:19
by moving this up to _RunPerfTests, this can retur
|
| + |
| + |
| +class TestRunner(base_test_runner.BaseTestRunner): |
| + """A TestRunner instance runs a perf test on a single device.""" |
| + |
| + def __init__(self, test_options, device, shard_index, tests, flaky_tests): |
|
frankf
2013/08/12 23:09:52
add a docstring with Args section
bulach
2013/08/13 08:58:19
Done.
|
| + super(TestRunner, self).__init__(device, None, 'Release') |
| + self._options = test_options |
| + self._tests = tests |
| + self._flaky_tests = flaky_tests |
| + |
| + @staticmethod |
| + def _SaveResult(result): |
| + with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f: |
| + f.write(pickle.dumps(result)) |
| + |
| + def _LaunchPerfTest(self, test_name): |
| + """Runs perf test. |
|
frankf
2013/08/12 23:09:52
a perf test
bulach
2013/08/13 08:58:19
Done.
|
| + |
|
frankf
2013/08/12 23:09:52
Args section
bulach
2013/08/13 08:58:19
Done.
|
| + Returns: |
| + A tuple containing (Output, base_test_result.ResultType) |
| + """ |
| + cmd = (self._tests[test_name] + ' --device ' + self.device + |
| + ' --keep_test_server_ports') |
|
frankf
2013/08/12 23:09:52
Use string formatting instead concat
bulach
2013/08/13 08:58:19
Done.
|
| + start_time = datetime.datetime.now() |
| + output, exit_code = pexpect.run( |
|
frankf
2013/08/12 23:09:52
one space before =
bulach
2013/08/13 08:58:19
Done.
|
| + cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT), |
| + withexitstatus=True, logfile=sys.stdout, timeout=1800, |
| + env=os.environ) |
| + end_time = datetime.datetime.now() |
| + result_type = base_test_result.ResultType.FAIL |
| + if exit_code == 0: |
| + result_type = base_test_result.ResultType.PASS |
| + if test_name in self._flaky_tests: |
| + exit_code = 0 |
|
frankf
2013/08/12 23:09:52
Do you want to at least log the real exit code?
bulach
2013/08/13 08:58:19
yeah, good point. there were a few other things bb
|
| + result_type = base_test_result.ResultType.PASS |
| + |
| + persisted_result = { |
| + 'name': test_name, |
| + 'output': output, |
| + 'exit_code': exit_code, |
| + 'result_type': result_type, |
| + 'total_time': (end_time - start_time).seconds, |
| + 'device': self.device, |
| + } |
| + self._SaveResult(persisted_result) |
| + |
| + return (output, result_type) |
| + |
| + def RunTest(self, test_name): |
| + """Run a perf test on the device. |
| + |
| + Args: |
| + test_name: String to use for logging the test result. |
| + |
| + Returns: |
| + A tuple of (TestRunResults, retry). |
| + """ |
| + output, result_type = self._LaunchPerfTest(test_name) |
| + results = base_test_result.TestRunResults() |
| + result = base_test_result.BaseTestResult(test_name, result_type) |
|
frankf
2013/08/12 23:09:52
same here with intermediate var
bulach
2013/08/13 08:58:19
Done.
|
| + results.AddResult(result) |
| + retry = None |
| + if result_type != base_test_result.ResultType.PASS: |
|
frankf
2013/08/12 23:09:52
if not results.DidRunPass()
bulach
2013/08/13 08:58:19
Done.
|
| + retry = test_name |
| + return results, retry |