Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs a perf test on a single device. | 5 """Runs a perf test on a single device. |
| 6 | 6 |
| 7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
| 8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
| 9 multiple connected devices. | 9 multiple connected devices. |
| 10 | 10 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 65 Returns: | 65 Returns: |
| 66 exit code generated by the test step. | 66 exit code generated by the test step. |
| 67 """ | 67 """ |
| 68 file_name = os.path.join(_OUTPUT_DIR, test_name) | 68 file_name = os.path.join(_OUTPUT_DIR, test_name) |
| 69 if not os.path.exists(file_name): | 69 if not os.path.exists(file_name): |
| 70 logging.error('File not found %s', file_name) | 70 logging.error('File not found %s', file_name) |
| 71 return 1 | 71 return 1 |
| 72 | 72 |
| 73 with file(file_name, 'r') as f: | 73 with file(file_name, 'r') as f: |
| 74 persisted_result = pickle.loads(f.read()) | 74 persisted_result = pickle.loads(f.read()) |
| 75 print '*' * 80 | |
|
frankf
2013/08/22 17:41:12
why not logging?
bulach
2013/08/23 09:13:55
done (the last one has to be print as it was, sinc
| |
| 76 print 'Output from:' | |
| 77 print persisted_result['cmd'] | |
| 78 print '*' * 80 | |
| 75 print persisted_result['output'] | 79 print persisted_result['output'] |
| 76 | 80 |
| 77 return persisted_result['exit_code'] | 81 return persisted_result['exit_code'] |
| 78 | 82 |
| 79 | 83 |
| 80 class TestRunner(base_test_runner.BaseTestRunner): | 84 class TestRunner(base_test_runner.BaseTestRunner): |
| 81 def __init__(self, test_options, device, tests, flaky_tests): | 85 def __init__(self, test_options, device, tests, flaky_tests): |
| 82 """A TestRunner instance runs a perf test on a single device. | 86 """A TestRunner instance runs a perf test on a single device. |
| 83 | 87 |
| 84 Args: | 88 Args: |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 102 | 106 |
| 103 Args: | 107 Args: |
| 104 test_name: the name of the test to be executed. | 108 test_name: the name of the test to be executed. |
| 105 | 109 |
| 106 Returns: | 110 Returns: |
| 107 A tuple containing (Output, base_test_result.ResultType) | 111 A tuple containing (Output, base_test_result.ResultType) |
| 108 """ | 112 """ |
| 109 cmd = ('%s --device %s --keep_test_server_ports' % | 113 cmd = ('%s --device %s --keep_test_server_ports' % |
| 110 (self._tests[test_name], self.device)) | 114 (self._tests[test_name], self.device)) |
| 111 start_time = datetime.datetime.now() | 115 start_time = datetime.datetime.now() |
| 112 output, exit_code = pexpect.run( | 116 output, exit_code = pexpect.run( |
|
frankf
2013/08/22 17:41:12
BTW, have you seen any issue with pexpect terminat
bulach
2013/08/23 09:13:55
yeah, sometimes exit_code is None (as per the othe
bulach
2013/08/23 11:25:08
for completeness: unfortunately subprocess.Popen d
| |
| 113 cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT), | 117 cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT), |
| 114 withexitstatus=True, logfile=sys.stdout, timeout=1800, | 118 withexitstatus=True, logfile=sys.stdout, timeout=1800, |
| 115 env=os.environ) | 119 env=os.environ) |
| 116 end_time = datetime.datetime.now() | 120 end_time = datetime.datetime.now() |
| 117 logging.info('%s : exit_code=%d in %d secs at %s', | 121 logging.info('%s : exit_code=%d in %d secs at %s', |
| 118 test_name, exit_code, (end_time - start_time).seconds, | 122 test_name, exit_code, (end_time - start_time).seconds, |
| 119 self.device) | 123 self.device) |
| 120 result_type = base_test_result.ResultType.FAIL | 124 result_type = base_test_result.ResultType.FAIL |
| 121 if exit_code == 0: | 125 if exit_code == 0: |
| 122 result_type = base_test_result.ResultType.PASS | 126 result_type = base_test_result.ResultType.PASS |
| 123 if test_name in self._flaky_tests: | 127 if test_name in self._flaky_tests: |
| 124 exit_code = 0 | 128 exit_code = 0 |
| 125 result_type = base_test_result.ResultType.PASS | 129 result_type = base_test_result.ResultType.PASS |
| 126 | 130 |
| 127 persisted_result = { | 131 persisted_result = { |
| 128 'name': test_name, | 132 'name': test_name, |
| 129 'output': output, | 133 'output': output, |
| 130 'exit_code': exit_code, | 134 'exit_code': exit_code, |
| 131 'result_type': result_type, | 135 'result_type': result_type, |
| 132 'total_time': (end_time - start_time).seconds, | 136 'total_time': (end_time - start_time).seconds, |
| 133 'device': self.device, | 137 'device': self.device, |
| 138 'cmd': cmd, | |
| 134 } | 139 } |
| 135 self._SaveResult(persisted_result) | 140 self._SaveResult(persisted_result) |
| 136 | 141 |
| 137 return (output, result_type) | 142 return (output, result_type) |
| 138 | 143 |
| 139 def RunTest(self, test_name): | 144 def RunTest(self, test_name): |
| 140 """Run a perf test on the device. | 145 """Run a perf test on the device. |
| 141 | 146 |
| 142 Args: | 147 Args: |
| 143 test_name: String to use for logging the test result. | 148 test_name: String to use for logging the test result. |
| 144 | 149 |
| 145 Returns: | 150 Returns: |
| 146 A tuple of (TestRunResults, retry). | 151 A tuple of (TestRunResults, retry). |
| 147 """ | 152 """ |
| 148 output, result_type = self._LaunchPerfTest(test_name) | 153 output, result_type = self._LaunchPerfTest(test_name) |
| 149 results = base_test_result.TestRunResults() | 154 results = base_test_result.TestRunResults() |
| 150 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 155 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
| 151 retry = None | 156 retry = None |
| 152 if not results.DidRunPass(): | 157 if not results.DidRunPass(): |
| 153 retry = test_name | 158 retry = test_name |
| 154 return results, retry | 159 return results, retry |
| OLD | NEW |