OLD | NEW |
---|---|
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs perf tests. | 5 """Runs perf tests. |
6 | 6 |
7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
9 multiple connected devices. | 9 multiple connected devices. |
10 | 10 |
(...skipping 28 matching lines...) Expand all Loading... | |
39 Note that script_to_execute necessarily have to take at least the following | 39 Note that script_to_execute necessarily have to take at least the following |
40 option: | 40 option: |
41 --device: the serial number to be passed to all adb commands. | 41 --device: the serial number to be passed to all adb commands. |
42 """ | 42 """ |
43 | 43 |
44 import datetime | 44 import datetime |
45 import logging | 45 import logging |
46 import os | 46 import os |
47 import pickle | 47 import pickle |
48 import sys | 48 import sys |
49 import time | |
49 | 50 |
51 from pylib import android_commands | |
50 from pylib import constants | 52 from pylib import constants |
53 from pylib import forwarder | |
51 from pylib import pexpect | 54 from pylib import pexpect |
52 from pylib.base import base_test_result | 55 from pylib.base import base_test_result |
53 from pylib.base import base_test_runner | 56 from pylib.base import base_test_runner |
54 | 57 |
55 | 58 |
56 def PrintTestOutput(test_name): | 59 def PrintTestOutput(test_name): |
57 """Helper method to print the output of previously executed test_name. | 60 """Helper method to print the output of previously executed test_name. |
58 | 61 |
59 Args: | 62 Args: |
60 test_name: name of the test that has been previously executed. | 63 test_name: name of the test that has been previously executed. |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
114 | 117 |
115 def _LaunchPerfTest(self, test_name): | 118 def _LaunchPerfTest(self, test_name): |
116 """Runs a perf test. | 119 """Runs a perf test. |
117 | 120 |
118 Args: | 121 Args: |
119 test_name: the name of the test to be executed. | 122 test_name: the name of the test to be executed. |
120 | 123 |
121 Returns: | 124 Returns: |
122 A tuple containing (Output, base_test_result.ResultType) | 125 A tuple containing (Output, base_test_result.ResultType) |
123 """ | 126 """ |
127 try: | |
128 logging.warning('Unmapping device ports') | |
129 forwarder.Forwarder.UnmapAllDevicePorts(self.adb) | |
130 time.sleep(5) | |
rmcilroy
2013/10/21 11:33:13
Is this necesary now we have --wait-for-device? I'
bulach
2013/10/21 15:30:13
Done.
| |
131 self.adb.KillAdbdDevice() | |
132 except Exception as e: | |
133 logging.error('Exception when tearing down device %s', e) | |
134 | |
124 cmd = ('%s --device %s' % | 135 cmd = ('%s --device %s' % |
125 (self._tests[test_name], self.device)) | 136 (self._tests[test_name], self.device)) |
126 logging.info('%s : %s', test_name, cmd) | 137 logging.info('%s : %s', test_name, cmd) |
127 start_time = datetime.datetime.now() | 138 start_time = datetime.datetime.now() |
128 | 139 |
129 timeout = 1800 | 140 timeout = 1800 |
130 if self._options.no_timeout: | 141 if self._options.no_timeout: |
131 timeout = None | 142 timeout = None |
132 full_cmd = cmd | 143 full_cmd = cmd |
133 if self._options.dry_run: | 144 if self._options.dry_run: |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
177 Returns: | 188 Returns: |
178 A tuple of (TestRunResults, retry). | 189 A tuple of (TestRunResults, retry). |
179 """ | 190 """ |
180 output, result_type = self._LaunchPerfTest(test_name) | 191 output, result_type = self._LaunchPerfTest(test_name) |
181 results = base_test_result.TestRunResults() | 192 results = base_test_result.TestRunResults() |
182 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 193 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
183 retry = None | 194 retry = None |
184 if not results.DidRunPass(): | 195 if not results.DidRunPass(): |
185 retry = test_name | 196 retry = test_name |
186 return results, retry | 197 return results, retry |
OLD | NEW |