Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(86)

Side by Side Diff: third_party/google_benchmark/tools/gbench/report.py

Issue 2865663003: Adding Google benchmarking library. (Closed)
Patch Set: Sketch. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 """report.py - Utilities for reporting statistics about benchmark results
2 """
3 import os
4
5 class BenchmarkColor(object):
6 def __init__(self, name, code):
7 self.name = name
8 self.code = code
9
10 def __repr__(self):
11 return '%s%r' % (self.__class__.__name__,
12 (self.name, self.code))
13
14 def __format__(self, format):
15 return self.code
16
17 # Benchmark Colors Enumeration
18 BC_NONE = BenchmarkColor('NONE', '')
19 BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
20 BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
21 BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
22 BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
23 BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
24 BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
25 BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
26 BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
27 BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
28 BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
29
30 def color_format(use_color, fmt_str, *args, **kwargs):
31 """
32 Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
33 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
34 is False then all color codes in 'args' and 'kwargs' are replaced with
35 the empty string.
36 """
37 assert use_color is True or use_color is False
38 if not use_color:
39 args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
40 for arg in args]
41 kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
42 for key, arg in kwargs.items()}
43 return fmt_str.format(*args, **kwargs)
44
45
46 def find_longest_name(benchmark_list):
47 """
48 Return the length of the longest benchmark name in a given list of
49 benchmark JSON objects
50 """
51 longest_name = 1
52 for bc in benchmark_list:
53 if len(bc['name']) > longest_name:
54 longest_name = len(bc['name'])
55 return longest_name
56
57
58 def calculate_change(old_val, new_val):
59 """
60 Return a float representing the decimal change between old_val and new_val.
61 """
62 if old_val == 0 and new_val == 0:
63 return 0.0
64 if old_val == 0:
65 return float(new_val - old_val) / (float(old_val + new_val) / 2)
66 return float(new_val - old_val) / abs(old_val)
67
68
69 def generate_difference_report(json1, json2, use_color=True):
70 """
71 Calculate and report the difference between each test of two benchmarks
72 runs specified as 'json1' and 'json2'.
73 """
74 first_col_width = find_longest_name(json1['benchmarks']) + 5
75 def find_test(name):
76 for b in json2['benchmarks']:
77 if b['name'] == name:
78 return b
79 return None
80 first_line = "{:<{}s} Time CPU Old New".fo rmat(
81 'Benchmark', first_col_width)
82 output_strs = [first_line, '-' * len(first_line)]
83
84 gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
85 for bn in gen:
86 other_bench = find_test(bn['name'])
87 if not other_bench:
88 continue
89
90 def get_color(res):
91 if res > 0.05:
92 return BC_FAIL
93 elif res > -0.07:
94 return BC_WHITE
95 else:
96 return BC_CYAN
97 fmt_str = "{}{:<{}s}{endc}{}{:+9.2f}{endc}{}{:+14.2f}{endc}{:14d}{:14d}"
98 tres = calculate_change(bn['real_time'], other_bench['real_time'])
99 cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
100 output_strs += [color_format(use_color, fmt_str,
101 BC_HEADER, bn['name'], first_col_width,
102 get_color(tres), tres, get_color(cpures), cpures,
103 bn['cpu_time'], other_bench['cpu_time'],
104 endc=BC_ENDC)]
105 return output_strs
106
107 ###############################################################################
108 # Unit tests
109
110 import unittest
111
112 class TestReportDifference(unittest.TestCase):
113 def load_results(self):
114 import json
115 testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), ' Inputs')
116 testOutput1 = os.path.join(testInputs, 'test1_run1.json')
117 testOutput2 = os.path.join(testInputs, 'test1_run2.json')
118 with open(testOutput1, 'r') as f:
119 json1 = json.load(f)
120 with open(testOutput2, 'r') as f:
121 json2 = json.load(f)
122 return json1, json2
123
124 def test_basic(self):
125 expect_lines = [
126 ['BM_SameTimes', '+0.00', '+0.00', '10', '10'],
127 ['BM_2xFaster', '-0.50', '-0.50', '50', '25'],
128 ['BM_2xSlower', '+1.00', '+1.00', '50', '100'],
129 ['BM_10PercentFaster', '-0.10', '-0.10', '100', '90'],
130 ['BM_10PercentSlower', '+0.10', '+0.10', '100', '110'],
131 ['BM_100xSlower', '+99.00', '+99.00', '100', '10000'],
132 ['BM_100xFaster', '-0.99', '-0.99', '10000', '100'],
133 ]
134 json1, json2 = self.load_results()
135 output_lines_with_header = generate_difference_report(json1, json2, use_ color=False)
136 output_lines = output_lines_with_header[2:]
137 print("\n".join(output_lines_with_header))
138 self.assertEqual(len(output_lines), len(expect_lines))
139 for i in xrange(0, len(output_lines)):
140 parts = [x for x in output_lines[i].split(' ') if x]
141 self.assertEqual(len(parts), 5)
142 self.assertEqual(parts, expect_lines[i])
143
144
145 if __name__ == '__main__':
146 unittest.main()
OLDNEW
« no previous file with comments | « third_party/google_benchmark/tools/gbench/__init__.py ('k') | third_party/google_benchmark/tools/gbench/util.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698