Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 """Runs a perf test on a single device. | |
| 6 | |
| 7 Our buildbot infrastructure requires each slave to run steps serially. | |
| 8 This is sub-optimal for android, where these steps can run independently on | |
| 9 multiple connected devices. | |
| 10 | |
| 11 The buildbots will run this script multiple times per cycle: | |
| 12 - First: all steps listed in --steps in will be executed in parallel using all | |
| 13 connected devices. Step results will be pickled to disk. Each step has a unique | |
| 14 name. The result code will be ignored if the step name is listed in | |
| 15 --flaky_steps. | |
|
frankf
2013/08/12 23:09:52
flaky_steps -> flaky-steps
bulach
2013/08/13 08:58:19
Done.
| |
| 16 The buildbot will treat this step as a regular step, and will not process any | |
| 17 graph data. | |
| 18 | |
| 19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file | |
| 20 with the step results previously saved. The buildbot will then process the graph | |
| 21 data accordingly. | |
| 22 | |
| 23 The JSON steps file contains a dictionary in the format: | |
| 24 { | |
| 25 "step_name_foo": "script_to_execute foo", | |
| 26 "step_name_bar": "script_to_execute bar" | |
| 27 } | |
| 28 | |
| 29 The JSON flaky steps file contains a list with step names which results should | |
| 30 be ignored: | |
| 31 [ | |
| 32 "step_name_foo", | |
| 33 "step_name_bar" | |
| 34 ] | |
| 35 | |
| 36 Note that script_to_execute necessarily have to take at least the following | |
| 37 options: | |
| 38 --device: the serial number to be passed to all adb commands. | |
| 39 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't | |
| 40 reset test server port allocation. | |
| 41 """ | |
| 42 | |
| 43 import datetime | |
| 44 import pexpect | |
| 45 import pickle | |
| 46 import os | |
| 47 import sys | |
| 48 | |
| 49 from pylib import constants | |
| 50 from pylib.base import base_test_result | |
| 51 from pylib.base import base_test_runner | |
| 52 | |
| 53 | |
| 54 _OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results') | |
| 55 | |
| 56 | |
| 57 class TestPrinter(base_test_runner.BaseTestRunner): | |
|
frankf
2013/08/12 23:09:52
docstring here and below
bulach
2013/08/13 08:58:19
Done.
| |
| 58 def __init__(self, test_options, device, shard_index): | |
| 59 super(TestPrinter, self).__init__(device, None, 'Release') | |
| 60 | |
| 61 def RunTest(self, test_name): | |
| 62 file_name = os.path.join(_OUTPUT_DIR, test_name) | |
| 63 if not os.path.exists(file_name): | |
| 64 print 'File not found ', file_name | |
|
frankf
2013/08/12 23:09:52
Use logging instead of print?
bulach
2013/08/13 08:58:19
Done.
| |
| 65 fail = base_test_result.ResultType.FAIL | |
| 66 return base_test_result.TestRunResults(file_name, fail), False | |
|
frankf
2013/08/12 23:09:52
file_name -> test_name?
frankf
2013/08/12 23:09:52
False -> None
| |
| 67 | |
| 68 with file(file_name, 'r') as f: | |
| 69 persisted_result = pickle.loads(f.read()) | |
| 70 print persisted_result['output'] | |
| 71 | |
| 72 results = base_test_result.TestRunResults() | |
| 73 result = base_test_result.BaseTestResult(test_name, | |
|
frankf
2013/08/12 23:09:52
You can get rid of intermediate var
bulach
2013/08/13 08:58:19
Done.
| |
| 74 persisted_result['result_type']) | |
| 75 results.AddResult(result) | |
| 76 return results, False | |
|
frankf
2013/08/12 23:09:52
None
bulach
2013/08/13 08:58:19
by moving this up to _RunPerfTests, this can retur
| |
| 77 | |
| 78 | |
| 79 class TestRunner(base_test_runner.BaseTestRunner): | |
| 80 """A TestRunner instance runs a perf test on a single device.""" | |
| 81 | |
| 82 def __init__(self, test_options, device, shard_index, tests, flaky_tests): | |
|
frankf
2013/08/12 23:09:52
add a docstring with Args section
bulach
2013/08/13 08:58:19
Done.
| |
| 83 super(TestRunner, self).__init__(device, None, 'Release') | |
| 84 self._options = test_options | |
| 85 self._tests = tests | |
| 86 self._flaky_tests = flaky_tests | |
| 87 | |
| 88 @staticmethod | |
| 89 def _SaveResult(result): | |
| 90 with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f: | |
| 91 f.write(pickle.dumps(result)) | |
| 92 | |
| 93 def _LaunchPerfTest(self, test_name): | |
| 94 """Runs perf test. | |
|
frankf
2013/08/12 23:09:52
a perf test
bulach
2013/08/13 08:58:19
Done.
| |
| 95 | |
|
frankf
2013/08/12 23:09:52
Args section
bulach
2013/08/13 08:58:19
Done.
| |
| 96 Returns: | |
| 97 A tuple containing (Output, base_test_result.ResultType) | |
| 98 """ | |
| 99 cmd = (self._tests[test_name] + ' --device ' + self.device + | |
| 100 ' --keep_test_server_ports') | |
|
frankf
2013/08/12 23:09:52
Use string formatting instead concat
bulach
2013/08/13 08:58:19
Done.
| |
| 101 start_time = datetime.datetime.now() | |
| 102 output, exit_code = pexpect.run( | |
|
frankf
2013/08/12 23:09:52
one space before =
bulach
2013/08/13 08:58:19
Done.
| |
| 103 cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT), | |
| 104 withexitstatus=True, logfile=sys.stdout, timeout=1800, | |
| 105 env=os.environ) | |
| 106 end_time = datetime.datetime.now() | |
| 107 result_type = base_test_result.ResultType.FAIL | |
| 108 if exit_code == 0: | |
| 109 result_type = base_test_result.ResultType.PASS | |
| 110 if test_name in self._flaky_tests: | |
| 111 exit_code = 0 | |
|
frankf
2013/08/12 23:09:52
Do you want to at least log the real exit code?
bulach
2013/08/13 08:58:19
yeah, good point. there were a few other things bb
| |
| 112 result_type = base_test_result.ResultType.PASS | |
| 113 | |
| 114 persisted_result = { | |
| 115 'name': test_name, | |
| 116 'output': output, | |
| 117 'exit_code': exit_code, | |
| 118 'result_type': result_type, | |
| 119 'total_time': (end_time - start_time).seconds, | |
| 120 'device': self.device, | |
| 121 } | |
| 122 self._SaveResult(persisted_result) | |
| 123 | |
| 124 return (output, result_type) | |
| 125 | |
| 126 def RunTest(self, test_name): | |
| 127 """Run a perf test on the device. | |
| 128 | |
| 129 Args: | |
| 130 test_name: String to use for logging the test result. | |
| 131 | |
| 132 Returns: | |
| 133 A tuple of (TestRunResults, retry). | |
| 134 """ | |
| 135 output, result_type = self._LaunchPerfTest(test_name) | |
| 136 results = base_test_result.TestRunResults() | |
| 137 result = base_test_result.BaseTestResult(test_name, result_type) | |
|
frankf
2013/08/12 23:09:52
same here with intermediate var
bulach
2013/08/13 08:58:19
Done.
| |
| 138 results.AddResult(result) | |
| 139 retry = None | |
| 140 if result_type != base_test_result.ResultType.PASS: | |
|
frankf
2013/08/12 23:09:52
if not results.DidRunPass()
bulach
2013/08/13 08:58:19
Done.
| |
| 141 retry = test_name | |
| 142 return results, retry | |
| OLD | NEW |