Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(308)

Side by Side Diff: bin/compare

Issue 1416833004: Make bin/c and bin/compare work on Windows. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « bin/c ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 2
3 import argparse 3 import argparse
4 import numpy
5 import sys 4 import sys
6 from scipy.stats import mannwhitneyu 5
7 from scipy.stats import sem 6 have_scipy = True
7 try:
8 import scipy.stats
9 except:
10 have_scipy = False
8 11
9 SIGNIFICANCE_THRESHOLD = 0.0001 12 SIGNIFICANCE_THRESHOLD = 0.0001
10 13
11 parser = argparse.ArgumentParser( 14 parser = argparse.ArgumentParser(
12 formatter_class=argparse.RawDescriptionHelpFormatter, 15 formatter_class=argparse.RawDescriptionHelpFormatter,
13 description='Compare performance of two runs from nanobench.') 16 description='Compare performance of two runs from nanobench.')
14 parser.add_argument('--use_means', action='store_true', default=False, 17 parser.add_argument('--use_means', action='store_true', default=False,
15 help='Use means to calculate performance ratios.') 18 help='Use means to calculate performance ratios.')
16 parser.add_argument('baseline', help='Baseline file.') 19 parser.add_argument('baseline', help='Baseline file.')
17 parser.add_argument('experiment', help='Experiment file.') 20 parser.add_argument('experiment', help='Experiment file.')
18 args = parser.parse_args() 21 args = parser.parse_args()
19 22
20 a,b = {},{} 23 a,b = {},{}
21 for (path, d) in [(args.baseline, a), (args.experiment, b)]: 24 for (path, d) in [(args.baseline, a), (args.experiment, b)]:
22 for line in open(path): 25 for line in open(path):
23 try: 26 try:
24 tokens = line.split() 27 tokens = line.split()
25 if tokens[0] != "Samples:": 28 if tokens[0] != "Samples:":
26 continue 29 continue
27 samples = tokens[1:-1] 30 samples = tokens[1:-1]
28 label = tokens[-1] 31 label = tokens[-1]
29 d[label] = map(float, samples) 32 d[label] = map(float, samples)
30 except: 33 except:
31 pass 34 pass
32 35
33 common = set(a.keys()).intersection(b.keys()) 36 common = set(a.keys()).intersection(b.keys())
34 37
38 def mean(xs):
39 return sum(xs) / len(xs)
40
35 ps = [] 41 ps = []
36 for key in common: 42 for key in common:
37 _, p = mannwhitneyu(a[key], b[key]) # Non-parametric t-test. Doesn't ass ume normal dist. 43 p, asem, bsem = 0, 0, 0
38 if args.use_means: 44 m = mean if args.use_means else min
39 am, bm = numpy.mean(a[key]), numpy.mean(b[key]) 45 am, bm = m(a[key]), m(b[key])
40 asem, bsem = sem(a[key]), sem(b[key]) 46 if have_scipy:
41 else: 47 _, p = scipy.stats.mannwhitneyu(a[key], b[key])
42 am, bm = min(a[key]), min(b[key]) 48 asem, bsem = scipy.stats.sem(a[key]), sem(b[key])
43 asem, bsem = 0, 0
44 ps.append((bm/am, p, key, am, bm, asem, bsem)) 49 ps.append((bm/am, p, key, am, bm, asem, bsem))
45 ps.sort(reverse=True) 50 ps.sort(reverse=True)
46 51
47 def humanize(ns): 52 def humanize(ns):
48 for threshold, suffix in [(1e9, 's'), (1e6, 'ms'), (1e3, 'us'), (1e0, 'ns')] : 53 for threshold, suffix in [(1e9, 's'), (1e6, 'ms'), (1e3, 'us'), (1e0, 'ns')] :
49 if ns > threshold: 54 if ns > threshold:
50 return "%.3g%s" % (ns/threshold, suffix) 55 return "%.3g%s" % (ns/threshold, suffix)
51 56
52 maxlen = max(map(len, common)) 57 maxlen = max(map(len, common))
53 58
54 # We print only signficant changes in benchmark timing distribution. 59 # We print only signficant changes in benchmark timing distribution.
55 bonferroni = SIGNIFICANCE_THRESHOLD / len(ps) # Adjust for the fact we've run m ultiple tests. 60 bonferroni = SIGNIFICANCE_THRESHOLD / len(ps) # Adjust for the fact we've run m ultiple tests.
56 for ratio, p, key, am, bm, asem, bsem in ps: 61 for ratio, p, key, am, bm, asem, bsem in ps:
57 if p < bonferroni: 62 if p < bonferroni:
58 str_ratio = ('%.2gx' if ratio < 1 else '%.3gx') % ratio 63 str_ratio = ('%.2gx' if ratio < 1 else '%.3gx') % ratio
59 if args.use_means: 64 if args.use_means:
60 print '%*s\t%6s(%6s) -> %6s(%6s)\t%s' % (maxlen, key, humanize(am), humanize(asem), 65 print '%*s\t%6s(%6s) -> %6s(%6s)\t%s' % (maxlen, key, humanize(am), humanize(asem),
61 humanize(bm), humanize(bsem ), str_ratio) 66 humanize(bm), humanize(bsem ), str_ratio)
62 else: 67 else:
63 print '%*s\t%6s -> %6s\t%s' % (maxlen, key, humanize(am), humanize(b m), str_ratio) 68 print '%*s\t%6s -> %6s\t%s' % (maxlen, key, humanize(am), humanize(b m), str_ratio)
OLDNEW
« no previous file with comments | « bin/c ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698