Index: bench/check_bench_regressions.py |
diff --git a/bench/check_bench_regressions.py b/bench/check_bench_regressions.py |
index b26eb56ae00ea71ed68f6d8bd0d037fd210069cb..b61f5104381b6e71b82b6d2a438b8fbde07044c3 100644 |
--- a/bench/check_bench_regressions.py |
+++ b/bench/check_bench_regressions.py |
@@ -134,16 +134,17 @@ def read_expectations(expectations, filename): |
if bench_entry in expectations: |
raise Exception("Dup entries for bench expectation %s" % |
bench_entry) |
- # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB) |
+ # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB, EXPECTED) |
expectations[bench_entry] = (float(elements[-2]), |
- float(elements[-1])) |
+ float(elements[-1]), |
+ float(elements[-3])) |
epoger
2014/02/19 18:51:08
Not new to this CL... but these are strange magic
benchen
2014/02/19 21:08:08
Done using the constant index approach, which is m
|
def check_expectations(lines, expectations, revision, key_suffix): |
"""Check if there are benches in the given revising out of range. |
epoger
2014/02/19 18:51:08
TODO: document function arguments, return value, a
benchen
2014/02/19 21:08:08
Done.
|
""" |
# The platform for this bot, to pass to the dashboard plot. |
platform = key_suffix[ : key_suffix.rfind('-')] |
- exceptions = [] |
+ exceptions = {} |
epoger
2014/02/19 18:51:08
Please document the structure of this dictionary.
benchen
2014/02/19 21:08:08
Done.
|
for line in lines: |
line_str = str(line) |
line_str = line_str[ : line_str.find('_{')] |
@@ -151,14 +152,23 @@ def check_expectations(lines, expectations, revision, key_suffix): |
if bench_platform_key not in expectations: |
continue |
this_bench_value = lines[line] |
- this_min, this_max = expectations[bench_platform_key] |
+ this_min, this_max, this_expected = expectations[bench_platform_key] |
if this_bench_value < this_min or this_bench_value > this_max: |
- exception = 'Bench %s value %s out of range [%s, %s].' % ( |
- bench_platform_key, this_bench_value, this_min, this_max) |
- exceptions.append(exception) |
+ off_ratio = this_bench_value / this_expected |
+ exception = 'Bench %s off range [%s, %s] (%s vs %s, %s%%).' % ( |
epoger
2014/02/19 18:51:08
off -> out of
benchen
2014/02/19 21:08:08
Done.
|
+ bench_platform_key, this_min, this_max, this_bench_value, |
+ this_expected, (off_ratio - 1) * 100) |
+ exceptions.setdefault(off_ratio, []).append(exception) |
if exceptions: |
- raise Exception('Bench values out of range:\n' + |
- '\n'.join(exceptions)) |
+ ratios = exceptions.keys() |
+ ratios.sort(reverse=True) |
+ outputs = ['Bench values out of range:', '', 'Slower:'] |
+ for ratio in ratios: |
+ if ratio < 1 and 'Faster:' not in outputs: |
epoger
2014/02/19 18:51:08
I think it might be cleaner to build two expectati
benchen
2014/02/19 21:08:08
Done.
|
+ outputs.extend(['', 'Faster:']) |
+ outputs.extend(exceptions[ratio]) |
+ |
+ raise Exception('\n'.join(outputs)) |
def main(): |
"""Parses command line and checks bench expectations.""" |