Index: tools/flakiness/is_flaky.py |
diff --git a/tools/flakiness/is_flaky_test.py b/tools/flakiness/is_flaky.py |
similarity index 51% |
copy from tools/flakiness/is_flaky_test.py |
copy to tools/flakiness/is_flaky.py |
index 3b7039bf862df6109c646580ec618b94bcc16faa..e90104f8110665fbb5738335b236a7a5b15dfa8a 100755 |
--- a/tools/flakiness/is_flaky_test.py |
+++ b/tools/flakiness/is_flaky.py |
@@ -7,6 +7,7 @@ |
if the failure rate is higher than the specified threshold, but is not 100%.""" |
import argparse |
+import multiprocessing.dummy |
import subprocess |
import sys |
import time |
@@ -23,43 +24,28 @@ def load_options(): |
parser.add_argument('command', nargs='+', help='Command to run test.') |
return parser.parse_args() |
- |
-def process_finished(running, num_passed, num_failed): |
- finished = [p for p in running if p.poll() is not None] |
- running[:] = [p for p in running if p.poll() is None] |
- num_passed += len([p for p in finished if p.returncode == 0]) |
- num_failed += len([p for p in finished if p.returncode != 0]) |
- print '%d processed finished. Total passed: %d. Total failed: %d' % ( |
- len(finished), num_passed, num_failed) |
- return num_passed, num_failed |
- |
+def run_test(job): |
+ print 'Starting retry attempt %d out of %d' % (job['index'] + 1, |
+ job['retries']) |
+ return subprocess.check_call(job['cmd'], stdout=subprocess.PIPE, |
+ stderr=subprocess.STDOUT) |
def main(): |
options = load_options() |
num_passed = num_failed = 0 |
running = [] |
- # Start all retries, while limiting total number of running processes. |
- for attempt in range(options.retries): |
- print 'Starting retry %d out of %d\n' % (attempt + 1, options.retries) |
- running.append(subprocess.Popen(options.command, stdout=subprocess.PIPE, |
- stderr=subprocess.STDOUT)) |
- while len(running) >= options.jobs: |
- print 'Waiting for previous retries to finish before starting new ones...' |
- time.sleep(0.1) |
- num_passed, num_failed = process_finished(running, num_passed, num_failed) |
- |
- |
- # Wait for the remaining retries to finish. |
- print 'Waiting for the remaining retries to finish...' |
- for process in running: |
- process.wait() |
+ pool = multiprocessing.dummy.Pool(processes=options.jobs) |
+ args = [{'index': index, 'retries': options.retries, 'cmd': options.command} |
+ for index in range(options.retries)] |
+ results = pool.map(run_test, args) |
+ num_passed = len([retcode for retcode in results if retcode == 0]) |
+ num_failed = len(results) - num_passed |
- num_passed, num_failed = process_finished(running, num_passed, num_failed) |
if num_passed == 0 or num_failed == 0: |
qyearsley
2014/09/16 18:52:09
You could also omit `or num_failed == 0`, since it
Sergiy Byelozyorov
2014/09/17 15:49:33
Done.
|
flakiness = 0 |
else: |
- flakiness = num_failed / float(options.retries) |
+ flakiness = num_failed / float(len(results)) |
print 'Flakiness is %.2f' % flakiness |
if flakiness > options.threshold: |