Chromium Code Reviews| Index: tools/flakiness/is_flaky_test.py |
| diff --git a/tools/flakiness/is_flaky_test.py b/tools/flakiness/is_flaky_test.py |
| new file mode 100755 |
| index 0000000000000000000000000000000000000000..3b7039bf862df6109c646580ec618b94bcc16faa |
| --- /dev/null |
| +++ b/tools/flakiness/is_flaky_test.py |
| @@ -0,0 +1,72 @@ |
| +#!/usr/bin/env python |
| +# Copyright 2014 The Chromium Authors. All rights reserved. |
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +"""Runs a test repeatedly to measure its flakiness. The return code is non-zero |
| +if the failure rate is higher than the specified threshold, but is not 100%.""" |
| + |
| +import argparse |
| +import subprocess |
| +import sys |
| +import time |
| + |
| +def load_options(): |
| + parser = argparse.ArgumentParser(description=__doc__) |
| + parser.add_argument('--retries', default=1000, type=int, |
| + help='Number of test retries to measure flakiness.') |
| + parser.add_argument('--threshold', default=0.05, type=float, |
| + help='Minimum flakiness level at which test is ' |
| + 'considered flaky.') |
| + parser.add_argument('--jobs', '-j', type=int, default=1, |
| + help='Number of parallel jobs to run tests.') |
| + parser.add_argument('command', nargs='+', help='Command to run test.') |
|
ojan
2014/09/15 19:59:53
This is a good start. Eventually we're going to ne
Sergiy Byelozyorov
2014/09/16 14:27:07
Acknowledged.
|
| + return parser.parse_args() |
| + |
| + |
| +def process_finished(running, num_passed, num_failed): |
| + finished = [p for p in running if p.poll() is not None] |
| + running[:] = [p for p in running if p.poll() is None] |
| + num_passed += len([p for p in finished if p.returncode == 0]) |
| + num_failed += len([p for p in finished if p.returncode != 0]) |
| + print '%d processed finished. Total passed: %d. Total failed: %d' % ( |
| + len(finished), num_passed, num_failed) |
| + return num_passed, num_failed |
| + |
| + |
| +def main(): |
| + options = load_options() |
| + num_passed = num_failed = 0 |
| + running = [] |
| + |
| + # Start all retries, while limiting total number of running processes. |
| + for attempt in range(options.retries): |
| + print 'Starting retry %d out of %d\n' % (attempt + 1, options.retries) |
| + running.append(subprocess.Popen(options.command, stdout=subprocess.PIPE, |
|
ojan
2014/09/15 19:59:53
Use multiprocessing.Pool. That will replace a good
Sergiy Byelozyorov
2014/09/16 14:27:07
Done.
|
| + stderr=subprocess.STDOUT)) |
| + while len(running) >= options.jobs: |
| + print 'Waiting for previous retries to finish before starting new ones...' |
| + time.sleep(0.1) |
| + num_passed, num_failed = process_finished(running, num_passed, num_failed) |
|
ojan
2014/09/15 19:59:53
I think this code would be easier to follow if you
Sergiy Byelozyorov
2014/09/16 14:27:07
Acknowledged.
|
| + |
| + |
| + # Wait for the remaining retries to finish. |
| + print 'Waiting for the remaining retries to finish...' |
| + for process in running: |
| + process.wait() |
| + |
| + num_passed, num_failed = process_finished(running, num_passed, num_failed) |
| + if num_passed == 0 or num_failed == 0: |
| + flakiness = 0 |
| + else: |
| + flakiness = num_failed / float(options.retries) |
| + |
| + print 'Flakiness is %.2f' % flakiness |
| + if flakiness > options.threshold: |
| + return 1 |
| + else: |
| + return 0 |
| + |
| + |
| +if __name__ == '__main__': |
| + sys.exit(main()) |