OLD | NEW |
---|---|
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs a perf test on a single device. | 5 """Runs a perf test on a single device. |
Dominik Grewe
2013/10/16 13:51:43
Is this still accurate? According to the comment b
bulach
2013/10/16 17:03:01
good point! fixed..
| |
6 | 6 |
7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
9 multiple connected devices. | 9 multiple connected devices. |
10 | 10 |
11 The buildbots will run this script multiple times per cycle: | 11 The buildbots will run this script multiple times per cycle: |
12 - First: all steps listed in --steps in will be executed in parallel using all | 12 - First: all steps listed in --steps in will be executed in parallel using all |
13 connected devices. Step results will be pickled to disk. Each step has a unique | 13 connected devices. Step results will be pickled to disk. Each step has a unique |
14 name. The result code will be ignored if the step name is listed in | 14 name. The result code will be ignored if the step name is listed in |
15 --flaky-steps. | 15 --flaky-steps. |
(...skipping 24 matching lines...) Expand all Loading... | |
40 options: | 40 options: |
41 --device: the serial number to be passed to all adb commands. | 41 --device: the serial number to be passed to all adb commands. |
42 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't | 42 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't |
43 reset test server port allocation. | 43 reset test server port allocation. |
44 """ | 44 """ |
45 | 45 |
46 import datetime | 46 import datetime |
47 import logging | 47 import logging |
48 import pickle | 48 import pickle |
49 import os | 49 import os |
50 import re | |
Dominik Grewe
2013/10/16 13:51:43
Is this needed for anything?
bulach
2013/10/16 17:03:01
nope, removed..
| |
50 import sys | 51 import sys |
51 | 52 |
52 from pylib import constants | 53 from pylib import constants |
53 from pylib import pexpect | 54 from pylib import pexpect |
54 from pylib.base import base_test_result | 55 from pylib.base import base_test_result |
55 from pylib.base import base_test_runner | 56 from pylib.base import base_test_runner |
56 | 57 |
57 | 58 |
58 def PrintTestOutput(test_name): | 59 def PrintTestOutput(test_name): |
59 """Helper method to print the output of previously executed test_name. | 60 """Helper method to print the output of previously executed test_name. |
(...skipping 29 matching lines...) Expand all Loading... | |
89 device: Device to run the tests. | 90 device: Device to run the tests. |
90 tests: a dict mapping test_name to command. | 91 tests: a dict mapping test_name to command. |
91 flaky_tests: a list of flaky test_name. | 92 flaky_tests: a list of flaky test_name. |
92 """ | 93 """ |
93 super(TestRunner, self).__init__(device, None, 'Release') | 94 super(TestRunner, self).__init__(device, None, 'Release') |
94 self._options = test_options | 95 self._options = test_options |
95 self._tests = tests | 96 self._tests = tests |
96 self._flaky_tests = flaky_tests | 97 self._flaky_tests = flaky_tests |
97 | 98 |
98 @staticmethod | 99 @staticmethod |
100 def _IsBetter(result): | |
101 if result['actual_exit_code'] == 0: | |
102 return True | |
103 pickled = os.path.join(constants.PERF_OUTPUT_DIR, | |
104 result['name']) | |
105 if not os.path.exists(pickled): | |
106 return True | |
107 with file(pickled, 'r') as f: | |
108 previous = pickle.loads(f.read()) | |
109 return result['actual_exit_code'] < previous['actual_exit_code'] | |
110 | |
111 @staticmethod | |
99 def _SaveResult(result): | 112 def _SaveResult(result): |
100 with file(os.path.join(constants.PERF_OUTPUT_DIR, | 113 if TestRunner._IsBetter(result): |
101 result['name']), 'w') as f: | 114 with file(os.path.join(constants.PERF_OUTPUT_DIR, |
102 f.write(pickle.dumps(result)) | 115 result['name']), 'w') as f: |
116 f.write(pickle.dumps(result)) | |
103 | 117 |
104 def _LaunchPerfTest(self, test_name): | 118 def _LaunchPerfTest(self, test_name): |
105 """Runs a perf test. | 119 """Runs a perf test. |
106 | 120 |
107 Args: | 121 Args: |
108 test_name: the name of the test to be executed. | 122 test_name: the name of the test to be executed. |
109 | 123 |
110 Returns: | 124 Returns: |
111 A tuple containing (Output, base_test_result.ResultType) | 125 A tuple containing (Output, base_test_result.ResultType) |
112 """ | 126 """ |
(...skipping 15 matching lines...) Expand all Loading... | |
128 env=os.environ) | 142 env=os.environ) |
129 end_time = datetime.datetime.now() | 143 end_time = datetime.datetime.now() |
130 if exit_code is None: | 144 if exit_code is None: |
131 exit_code = -1 | 145 exit_code = -1 |
132 logging.info('%s : exit_code=%d in %d secs at %s', | 146 logging.info('%s : exit_code=%d in %d secs at %s', |
133 test_name, exit_code, (end_time - start_time).seconds, | 147 test_name, exit_code, (end_time - start_time).seconds, |
134 self.device) | 148 self.device) |
135 result_type = base_test_result.ResultType.FAIL | 149 result_type = base_test_result.ResultType.FAIL |
136 if exit_code == 0: | 150 if exit_code == 0: |
137 result_type = base_test_result.ResultType.PASS | 151 result_type = base_test_result.ResultType.PASS |
152 actual_exit_code = exit_code | |
138 if test_name in self._flaky_tests: | 153 if test_name in self._flaky_tests: |
139 # The exit_code is used at the second stage when printing the | 154 # The exit_code is used at the second stage when printing the |
140 # test output. If the test is flaky, force to "0" to get that step green | 155 # test output. If the test is flaky, force to "0" to get that step green |
141 # whilst still gathering data to the perf dashboards. | 156 # whilst still gathering data to the perf dashboards. |
142 # The result_type is used by the test_dispatcher to retry the test. | 157 # The result_type is used by the test_dispatcher to retry the test. |
143 exit_code = 0 | 158 exit_code = 0 |
144 | 159 |
145 persisted_result = { | 160 persisted_result = { |
146 'name': test_name, | 161 'name': test_name, |
147 'output': output, | 162 'output': output, |
148 'exit_code': exit_code, | 163 'exit_code': exit_code, |
164 'actual_exit_code': actual_exit_code, | |
149 'result_type': result_type, | 165 'result_type': result_type, |
150 'total_time': (end_time - start_time).seconds, | 166 'total_time': (end_time - start_time).seconds, |
151 'device': self.device, | 167 'device': self.device, |
152 'cmd': cmd, | 168 'cmd': cmd, |
153 } | 169 } |
154 self._SaveResult(persisted_result) | 170 self._SaveResult(persisted_result) |
155 | 171 |
156 return (output, result_type) | 172 return (output, result_type) |
157 | 173 |
158 def RunTest(self, test_name): | 174 def RunTest(self, test_name): |
159 """Run a perf test on the device. | 175 """Run a perf test on the device. |
160 | 176 |
161 Args: | 177 Args: |
162 test_name: String to use for logging the test result. | 178 test_name: String to use for logging the test result. |
163 | 179 |
164 Returns: | 180 Returns: |
165 A tuple of (TestRunResults, retry). | 181 A tuple of (TestRunResults, retry). |
166 """ | 182 """ |
167 output, result_type = self._LaunchPerfTest(test_name) | 183 output, result_type = self._LaunchPerfTest(test_name) |
168 results = base_test_result.TestRunResults() | 184 results = base_test_result.TestRunResults() |
169 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 185 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
170 retry = None | 186 retry = None |
171 if not results.DidRunPass(): | 187 if not results.DidRunPass(): |
172 retry = test_name | 188 retry = test_name |
173 return results, retry | 189 return results, retry |
OLD | NEW |