OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 """ |
| 3 compare_bench.py - Compare two benchmarks or their results and report the |
| 4 difference. |
| 5 """ |
| 6 import argparse |
| 7 from argparse import ArgumentParser |
| 8 import sys |
| 9 import gbench |
| 10 from gbench import util, report |
| 11 from gbench.util import * |
| 12 |
| 13 def check_inputs(in1, in2, flags): |
| 14 """ |
| 15 Perform checking on the user provided inputs and diagnose any abnormalities |
| 16 """ |
| 17 in1_kind, in1_err = classify_input_file(in1) |
| 18 in2_kind, in2_err = classify_input_file(in2) |
| 19 output_file = find_benchmark_flag('--benchmark_out=', flags) |
| 20 output_type = find_benchmark_flag('--benchmark_out_format=', flags) |
| 21 if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: |
| 22 print(("WARNING: '--benchmark_out=%s' will be passed to both " |
| 23 "benchmarks causing it to be overwritten") % output_file) |
| 24 if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: |
| 25 print("WARNING: passing --benchmark flags has no effect since both " |
| 26 "inputs are JSON") |
| 27 if output_type is not None and output_type != 'json': |
| 28 print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`
" |
| 29 " is not supported.") % output_type) |
| 30 sys.exit(1) |
| 31 |
| 32 |
| 33 def main(): |
| 34 parser = ArgumentParser( |
| 35 description='compare the results of two benchmarks') |
| 36 parser.add_argument( |
| 37 'test1', metavar='test1', type=str, nargs=1, |
| 38 help='A benchmark executable or JSON output file') |
| 39 parser.add_argument( |
| 40 'test2', metavar='test2', type=str, nargs=1, |
| 41 help='A benchmark executable or JSON output file') |
| 42 # FIXME this is a dummy argument which will never actually match |
| 43 # any --benchmark flags but it helps generate a better usage message |
| 44 parser.add_argument( |
| 45 'benchmark_options', metavar='benchmark_option', nargs='*', |
| 46 help='Arguments to pass when running benchmark executables' |
| 47 ) |
| 48 args, unknown_args = parser.parse_known_args() |
| 49 # Parse the command line flags |
| 50 test1 = args.test1[0] |
| 51 test2 = args.test2[0] |
| 52 if args.benchmark_options: |
| 53 print("Unrecognized positional argument arguments: '%s'" |
| 54 % args.benchmark_options) |
| 55 exit(1) |
| 56 benchmark_options = unknown_args |
| 57 check_inputs(test1, test2, benchmark_options) |
| 58 # Run the benchmarks and report the results |
| 59 json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) |
| 60 json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) |
| 61 output_lines = gbench.report.generate_difference_report(json1, json2) |
| 62 print('Comparing %s to %s' % (test1, test2)) |
| 63 for ln in output_lines: |
| 64 print(ln) |
| 65 |
| 66 |
| 67 if __name__ == '__main__': |
| 68 main() |
OLD | NEW |