Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 113 start_time = time.time() | 113 start_time = time.time() |
| 114 try: | 114 try: |
| 115 with message_pool.get(self, self._worker_factory, num_workers, self. _port.worker_startup_delay_secs(), self._port.host) as pool: | 115 with message_pool.get(self, self._worker_factory, num_workers, self. _port.worker_startup_delay_secs(), self._port.host) as pool: |
| 116 pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) | 116 pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) |
| 117 except TestRunInterruptedException, e: | 117 except TestRunInterruptedException, e: |
| 118 _log.warning(e.reason) | 118 _log.warning(e.reason) |
| 119 run_results.interrupted = True | 119 run_results.interrupted = True |
| 120 except KeyboardInterrupt: | 120 except KeyboardInterrupt: |
| 121 self._printer.flush() | 121 self._printer.flush() |
| 122 self._printer.writeln('Interrupted, exiting ...') | 122 self._printer.writeln('Interrupted, exiting ...') |
| 123 raise | 123 raise |
|
Dirk Pranke
2013/08/27 18:18:25
If your intent is to create and show the results.j
| |
| 124 except Exception, e: | 124 except Exception, e: |
| 125 _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e ))) | 125 _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e ))) |
| 126 raise | 126 raise |
| 127 finally: | 127 finally: |
| 128 run_results.run_time = time.time() - start_time | 128 run_results.run_time = time.time() - start_time |
| 129 # Mark execution got interrupted. | |
| 130 run_results.interrupted = True | |
| 131 # Need to return run_results even in this case. | |
| 132 return run_results | |
| 129 | 133 |
| 130 return run_results | 134 return run_results |
| 131 | 135 |
| 132 def _worker_factory(self, worker_connection): | 136 def _worker_factory(self, worker_connection): |
| 133 results_directory = self._results_directory | 137 results_directory = self._results_directory |
| 134 if self._retrying: | 138 if self._retrying: |
| 135 self._filesystem.maybe_make_directory(self._filesystem.join(self._re sults_directory, 'retries')) | 139 self._filesystem.maybe_make_directory(self._filesystem.join(self._re sults_directory, 'retries')) |
| 136 results_directory = self._filesystem.join(self._results_directory, ' retries') | 140 results_directory = self._filesystem.join(self._results_directory, ' retries') |
| 137 return Worker(worker_connection, results_directory, self._options) | 141 return Worker(worker_connection, results_directory, self._options) |
| 138 | 142 |
| (...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 526 def split_at(seq, index): | 530 def split_at(seq, index): |
| 527 return (seq[:index], seq[index:]) | 531 return (seq[:index], seq[index:]) |
| 528 | 532 |
| 529 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) | 533 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) |
| 530 new_shards = [] | 534 new_shards = [] |
| 531 remaining_shards = old_shards | 535 remaining_shards = old_shards |
| 532 while remaining_shards: | 536 while remaining_shards: |
| 533 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) | 537 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) |
| 534 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) | 538 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) |
| 535 return new_shards | 539 return new_shards |
| OLD | NEW |