Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Helper script to shard build bot steps and save results to disk. | 7 """Helper script to shard build bot steps and save results to disk. |
| 8 | 8 |
| 9 Our buildbot infrastructure requires each slave to run steps serially. | 9 Our buildbot infrastructure requires each slave to run steps serially. |
| 10 This is sub-optimal for android, where these steps can run independently on | 10 This is sub-optimal for android, where these steps can run independently on |
| 11 multiple connected devices. | 11 multiple connected devices. |
| 12 | 12 |
| 13 The buildbots will run this script multiple times per cycle: | 13 The buildbots will run this script multiple times per cycle: |
| 14 - First, without params: all steps will be executed in parallel using all | 14 - First: all steps listed in -s in will be executed in parallel using all |
| 15 connected devices. Step results will be pickled to disk (each step has a unique | 15 connected devices. Step results will be pickled to disk. Each step has a unique |
| 16 name). | 16 name. The result code will be ignored if the step name is listed in |
| 17 --flaky_steps. | |
| 17 The buildbot will treat this step as a regular step, and will not process any | 18 The buildbot will treat this step as a regular step, and will not process any |
| 18 graph data. | 19 graph data. |
| 19 | 20 |
| 20 - Then, with -p STEP_NAME: at this stage, we'll simply print the file with the | 21 - Then, with -p STEP_NAME: at this stage, we'll simply print the file with the |
| 21 step results previously saved. The buildbot will then process the graph data | 22 step results previously saved. The buildbot will then process the graph data |
| 22 accordingly. | 23 accordingly. |
| 23 | 24 |
| 24 The JSON config contains is a file containing a dictionary in the format: | 25 The JSON steps file contains a dictionary in the format: |
| 25 { | 26 { |
| 26 'step_name_foo': 'script_to_execute foo', | 27 "step_name_foo": "script_to_execute foo", |
| 27 'step_name_bar': 'script_to_execute bar' | 28 "step_name_bar": "script_to_execute bar" |
| 28 } | 29 } |
| 29 | 30 |
| 31 The JSON flaky steps file contains a list with step names which results should | |
| 32 be ignored: | |
| 33 [ | |
| 34 "step_name_foo", | |
| 35 "step_name_bar" | |
| 36 ] | |
| 37 | |
| 30 Note that script_to_execute necessarily have to take at least the following | 38 Note that script_to_execute necessarily have to take at least the following |
| 31 options: | 39 options: |
| 32 --device: the serial number to be passed to all adb commands. | 40 --device: the serial number to be passed to all adb commands. |
| 33 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't | 41 --keep_test_server_ports: indicates it's being run as a shard, and shouldn't |
| 34 reset test server port allocation. | 42 reset test server port allocation. |
| 35 """ | 43 """ |
| 36 | 44 |
| 37 | 45 |
| 38 import datetime | 46 import datetime |
| 39 import json | 47 import json |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 64 def _RunStepsPerDevice(steps): | 72 def _RunStepsPerDevice(steps): |
| 65 results = [] | 73 results = [] |
| 66 for step in steps: | 74 for step in steps: |
| 67 start_time = datetime.datetime.now() | 75 start_time = datetime.datetime.now() |
| 68 print 'Starting %s: %s %s at %s' % (step['name'], step['cmd'], | 76 print 'Starting %s: %s %s at %s' % (step['name'], step['cmd'], |
| 69 start_time, step['device']) | 77 start_time, step['device']) |
| 70 output, exit_code = pexpect.run( | 78 output, exit_code = pexpect.run( |
| 71 step['cmd'], cwd=os.path.abspath(constants.CHROME_DIR), | 79 step['cmd'], cwd=os.path.abspath(constants.CHROME_DIR), |
| 72 withexitstatus=True, logfile=sys.stdout, timeout=1800, | 80 withexitstatus=True, logfile=sys.stdout, timeout=1800, |
| 73 env=os.environ) | 81 env=os.environ) |
| 82 exit_code = exit_code or 0 | |
| 74 end_time = datetime.datetime.now() | 83 end_time = datetime.datetime.now() |
| 75 print 'Finished %s: %s %s at %s' % (step['name'], step['cmd'], | 84 exit_msg = '%s %s' % (exit_code, |
| 76 end_time, step['device']) | 85 '(ignored, flaky step)' if step['is_flaky'] else '') |
| 86 print 'Finished %s: %s %s %s at %s' % (step['name'], exit_msg, step['cmd'], | |
| 87 end_time, step['device']) | |
| 88 if step['is_flaky']: | |
| 89 exit_code = 0 | |
| 77 result = {'name': step['name'], | 90 result = {'name': step['name'], |
| 78 'output': output, | 91 'output': output, |
| 79 'exit_code': exit_code or 0, | 92 'exit_code': exit_code or 0, |
|
Sami
2013/02/26 17:19:13
No need for the "or 0" anymore :)
| |
| 80 'total_time': (end_time - start_time).seconds, | 93 'total_time': (end_time - start_time).seconds, |
| 81 'device': step['device']} | 94 'device': step['device']} |
| 82 _SaveResult(result) | 95 _SaveResult(result) |
| 83 results += [result] | 96 results += [result] |
| 84 return results | 97 return results |
| 85 | 98 |
| 86 | 99 |
| 87 def _RunShardedSteps(steps, devices): | 100 def _RunShardedSteps(steps, flaky_steps, devices): |
| 88 assert steps | 101 assert steps |
| 89 assert devices, 'No devices connected?' | 102 assert devices, 'No devices connected?' |
| 90 if os.path.exists(_OUTPUT_DIR): | 103 if os.path.exists(_OUTPUT_DIR): |
| 91 assert '/step_results' in _OUTPUT_DIR | 104 assert '/step_results' in _OUTPUT_DIR |
| 92 shutil.rmtree(_OUTPUT_DIR) | 105 shutil.rmtree(_OUTPUT_DIR) |
| 93 if not os.path.exists(_OUTPUT_DIR): | 106 if not os.path.exists(_OUTPUT_DIR): |
| 94 os.makedirs(_OUTPUT_DIR) | 107 os.makedirs(_OUTPUT_DIR) |
| 95 step_names = sorted(steps.keys()) | 108 step_names = sorted(steps.keys()) |
| 96 all_params = [] | 109 all_params = [] |
| 97 num_devices = len(devices) | 110 num_devices = len(devices) |
| 98 shard_size = (len(steps) + num_devices - 1) / num_devices | 111 shard_size = (len(steps) + num_devices - 1) / num_devices |
| 99 for i, device in enumerate(devices): | 112 for i, device in enumerate(devices): |
| 100 steps_per_device = [] | 113 steps_per_device = [] |
| 101 for s in steps.keys()[i * shard_size:(i + 1) * shard_size]: | 114 for s in steps.keys()[i * shard_size:(i + 1) * shard_size]: |
| 102 steps_per_device += [{'name': s, | 115 steps_per_device += [{'name': s, |
| 103 'device': device, | 116 'device': device, |
| 117 'is_flaky': s in flaky_steps, | |
| 104 'cmd': steps[s] + ' --device ' + device + | 118 'cmd': steps[s] + ' --device ' + device + |
| 105 ' --keep_test_server_ports'}] | 119 ' --keep_test_server_ports'}] |
| 106 all_params += [steps_per_device] | 120 all_params += [steps_per_device] |
| 107 print 'Start sharding (note: output is not synchronized...)' | 121 print 'Start sharding (note: output is not synchronized...)' |
| 108 print '*' * 80 | 122 print '*' * 80 |
| 109 start_time = datetime.datetime.now() | 123 start_time = datetime.datetime.now() |
| 110 pool = multiprocessing.Pool(processes=num_devices) | 124 pool = multiprocessing.Pool(processes=num_devices) |
| 111 async_results = pool.map_async(_RunStepsPerDevice, all_params) | 125 async_results = pool.map_async(_RunStepsPerDevice, all_params) |
| 112 results_per_device = async_results.get(999999) | 126 results_per_device = async_results.get(999999) |
| 113 end_time = datetime.datetime.now() | 127 end_time = datetime.datetime.now() |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 150 os.kill(int(pid), signal.SIGQUIT) | 164 os.kill(int(pid), signal.SIGQUIT) |
| 151 except Exception as e: | 165 except Exception as e: |
| 152 logging.warning('Failed killing %s %s %s', server, pid, e) | 166 logging.warning('Failed killing %s %s %s', server, pid, e) |
| 153 | 167 |
| 154 | 168 |
| 155 def main(argv): | 169 def main(argv): |
| 156 parser = optparse.OptionParser() | 170 parser = optparse.OptionParser() |
| 157 parser.add_option('-s', '--steps', | 171 parser.add_option('-s', '--steps', |
| 158 help='A JSON file containing all the steps to be ' | 172 help='A JSON file containing all the steps to be ' |
| 159 'sharded.') | 173 'sharded.') |
| 174 parser.add_option('--flaky_steps', | |
| 175 help='A JSON file containing steps that are flaky and ' | |
| 176 'will have its exit code ignored.') | |
| 160 parser.add_option('-p', '--print_results', | 177 parser.add_option('-p', '--print_results', |
| 161 help='Only prints the results for the previously ' | 178 help='Only prints the results for the previously ' |
| 162 'executed step, do not run it again.') | 179 'executed step, do not run it again.') |
| 163 options, urls = parser.parse_args(argv) | 180 options, urls = parser.parse_args(argv) |
| 164 if options.print_results: | 181 if options.print_results: |
| 165 return _PrintStepOutput(options.print_results) | 182 return _PrintStepOutput(options.print_results) |
| 166 | 183 |
| 167 # At this point, we should kill everything that may have been left over from | 184 # At this point, we should kill everything that may have been left over from |
| 168 # previous runs. | 185 # previous runs. |
| 169 _KillPendingServers() | 186 _KillPendingServers() |
| 170 | 187 |
| 171 # Reset the test port allocation. It's important to do it before starting | 188 # Reset the test port allocation. It's important to do it before starting |
| 172 # to dispatch any step. | 189 # to dispatch any step. |
| 173 if not ports.ResetTestServerPortAllocation(): | 190 if not ports.ResetTestServerPortAllocation(): |
| 174 raise Exception('Failed to reset test server port.') | 191 raise Exception('Failed to reset test server port.') |
| 175 | 192 |
| 176 # Sort the devices so that we'll try to always run a step in the same device. | 193 # Sort the devices so that we'll try to always run a step in the same device. |
| 177 devices = sorted(android_commands.GetAttachedDevices()) | 194 devices = sorted(android_commands.GetAttachedDevices()) |
| 178 if not devices: | 195 if not devices: |
| 179 print 'You must attach a device' | 196 print 'You must attach a device' |
| 180 return 1 | 197 return 1 |
| 181 | 198 |
| 182 with file(options.steps, 'r') as f: | 199 with file(options.steps, 'r') as f: |
| 183 steps = json.load(f) | 200 steps = json.load(f) |
| 184 return _RunShardedSteps(steps, devices) | 201 flaky_steps = [] |
| 202 if options.flaky_steps: | |
| 203 with file(options.flaky_steps, 'r') as f: | |
| 204 flaky_steps = json.load(f) | |
| 205 return _RunShardedSteps(steps, flaky_steps, devices) | |
| 185 | 206 |
| 186 | 207 |
| 187 if __name__ == '__main__': | 208 if __name__ == '__main__': |
| 188 sys.exit(main(sys.argv)) | 209 sys.exit(main(sys.argv)) |
| OLD | NEW |