| OLD | NEW |
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs perf tests. | 5 """Runs perf tests. |
| 6 | 6 |
| 7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
| 8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
| 9 multiple connected devices. | 9 multiple connected devices. |
| 10 | 10 |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 59 import time | 59 import time |
| 60 | 60 |
| 61 from pylib import cmd_helper | 61 from pylib import cmd_helper |
| 62 from pylib import constants | 62 from pylib import constants |
| 63 from pylib import forwarder | 63 from pylib import forwarder |
| 64 from pylib.base import base_test_result | 64 from pylib.base import base_test_result |
| 65 from pylib.base import base_test_runner | 65 from pylib.base import base_test_runner |
| 66 from pylib.device import device_errors | 66 from pylib.device import device_errors |
| 67 | 67 |
| 68 | 68 |
| 69 def GetPersistedResult(test_name): |
| 70 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) |
| 71 if not os.path.exists(file_name): |
| 72 logging.error('File not found %s', file_name) |
| 73 return None |
| 74 |
| 75 with file(file_name, 'r') as f: |
| 76 return pickle.loads(f.read()) |
| 77 |
| 78 |
| 69 def OutputJsonList(json_input, json_output): | 79 def OutputJsonList(json_input, json_output): |
| 70 with file(json_input, 'r') as i: | 80 with file(json_input, 'r') as i: |
| 71 all_steps = json.load(i) | 81 all_steps = json.load(i) |
| 72 step_values = [{'test': k, 'device_affinity': v['device_affinity']} | 82 |
| 73 for k, v in all_steps['steps'].iteritems()] | 83 step_values = [] |
| 84 for k, v in all_steps['steps'].iteritems(): |
| 85 data = {'test': k, 'device_affinity': v['device_affinity']} |
| 86 |
| 87 persisted_result = GetPersistedResult(k) |
| 88 if persisted_result: |
| 89 data['total_time'] = persisted_result['total_time'] |
| 90 step_values.append(data) |
| 91 |
| 74 with file(json_output, 'w') as o: | 92 with file(json_output, 'w') as o: |
| 75 o.write(json.dumps(step_values)) | 93 o.write(json.dumps(step_values)) |
| 76 return 0 | 94 return 0 |
| 77 | 95 |
| 78 | 96 |
| 79 def PrintTestOutput(test_name, json_file_name=None): | 97 def PrintTestOutput(test_name, json_file_name=None): |
| 80 """Helper method to print the output of previously executed test_name. | 98 """Helper method to print the output of previously executed test_name. |
| 81 | 99 |
| 82 Args: | 100 Args: |
| 83 test_name: name of the test that has been previously executed. | 101 test_name: name of the test that has been previously executed. |
| 84 json_file_name: name of the file to output chartjson data to. | 102 json_file_name: name of the file to output chartjson data to. |
| 85 | 103 |
| 86 Returns: | 104 Returns: |
| 87 exit code generated by the test step. | 105 exit code generated by the test step. |
| 88 """ | 106 """ |
| 89 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) | 107 persisted_result = GetPersistedResult(test_name) |
| 90 if not os.path.exists(file_name): | 108 if not persisted_result: |
| 91 logging.error('File not found %s', file_name) | |
| 92 return 1 | 109 return 1 |
| 93 | |
| 94 with file(file_name, 'r') as f: | |
| 95 persisted_result = pickle.loads(f.read()) | |
| 96 logging.info('*' * 80) | 110 logging.info('*' * 80) |
| 97 logging.info('Output from:') | 111 logging.info('Output from:') |
| 98 logging.info(persisted_result['cmd']) | 112 logging.info(persisted_result['cmd']) |
| 99 logging.info('*' * 80) | 113 logging.info('*' * 80) |
| 100 print persisted_result['output'] | 114 print persisted_result['output'] |
| 101 | 115 |
| 102 if json_file_name: | 116 if json_file_name: |
| 103 with file(json_file_name, 'w') as f: | 117 with file(json_file_name, 'w') as f: |
| 104 f.write(persisted_result['chartjson']) | 118 f.write(persisted_result['chartjson']) |
| 105 | 119 |
| (...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 329 Returns: | 343 Returns: |
| 330 A tuple of (TestRunResults, retry). | 344 A tuple of (TestRunResults, retry). |
| 331 """ | 345 """ |
| 332 _, result_type = self._LaunchPerfTest(test_name) | 346 _, result_type = self._LaunchPerfTest(test_name) |
| 333 results = base_test_result.TestRunResults() | 347 results = base_test_result.TestRunResults() |
| 334 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 348 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
| 335 retry = None | 349 retry = None |
| 336 if not results.DidRunPass(): | 350 if not results.DidRunPass(): |
| 337 retry = test_name | 351 retry = test_name |
| 338 return results, retry | 352 return results, retry |
| OLD | NEW |