OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 | |
3 # Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
4 # Use of this source code is governed by a BSD-style license that can be | |
5 # found in the LICENSE file. | |
6 | |
7 | |
8 """Generate new bench expectations from results of trybots on a code review.""" | |
epoger
2014/05/27 15:29:56
Is this tool intended to be run by humans?
If so:
borenet
2014/05/27 19:36:14
For now, the primary user will be the RecreateSKPs
benchen
2014/05/27 20:51:00
Yes, that'll be cool.
On 2014/05/27 19:36:14, bore
| |
9 | |
10 | |
11 import collections | |
12 import compare_codereview | |
13 import os | |
14 import re | |
15 import subprocess | |
16 import sys | |
17 | |
18 | |
19 BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*' | |
20 | |
21 | |
22 def find_all_builds(codereview_url): | |
23 """Finds and returns information about trybot runs for a code review. | |
24 | |
25 Args: | |
26 codereview_url: URL of the codereview in question. | |
27 | |
28 Returns: | |
29 List of NamedTuples: (builder_name, build_number, is_finished) | |
30 """ | |
31 results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url) | |
32 TryBuild = collections.namedtuple( | |
33 'TryBuild', ['builder_name', 'build_number', 'is_finished']) | |
34 try_builds = [] | |
35 | |
36 for builder, data in results.iteritems(): | |
37 if builder.startswith('Perf'): | |
38 try_builds.append(TryBuild(builder, data.url.split('/')[-1], | |
39 data.status != 'pending')) | |
40 return try_builds | |
41 | |
42 | |
43 def get_bench_data(builder, build_num, dest_dir): | |
44 """Download the bench data for the given builder at the given build_num. | |
45 | |
46 Args: | |
47 builder: string; name of the builder. | |
48 build_num: string; build number. | |
49 dest_dir: string; destination directory for the bench data. | |
50 """ | |
51 url = BENCH_DATA_URL % (builder, build_num) | |
52 subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir], | |
53 stdout=subprocess.PIPE, | |
54 stderr=subprocess.PIPE) | |
55 | |
56 | |
57 def find_revision_from_downloaded_data(dest_dir): | |
58 """Finds the revision at which the downloaded data was generated. | |
59 | |
60 Args: | |
61 dest_dir: string; directory holding the downloaded data. | |
62 | |
63 Returns: | |
64 The revision (git commit hash) at which the downloaded data was | |
65 generated, or None if no revision can be found. | |
66 """ | |
67 for data_file in os.listdir(dest_dir): | |
68 match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data*', data_file) | |
benchen
2014/05/23 22:51:02
The files that end with _data are microbenches onl
borenet
2014/05/27 19:36:14
It doesn't really matter for this use case; all of
benchen
2014/05/27 20:51:00
That's right, thanks.
On 2014/05/27 19:36:14, bore
| |
69 if match: | |
70 return match.group('revision') | |
71 return None | |
72 | |
73 | |
74 def gen_bench_expectations_from_codereview(codereview_url, | |
75 error_on_unfinished=True): | |
76 """Generate bench expectations from a code review. | |
77 | |
78 Scans the given code review for Perf trybot runs. Downloads the results of | |
79 finished trybots and uses them to generate new expectations for their | |
80 waterfall counterparts. | |
81 | |
82 Args: | |
83 url: string; URL of the code review. | |
84 error_on_unfinished: bool; throw an error if any trybot has not finished. | |
85 """ | |
86 try_builds = find_all_builds(codereview_url) | |
87 | |
88 # Verify that all trybots have finished running. | |
89 if error_on_unfinished: | |
90 for try_build in try_builds: | |
91 if not try_build.is_finished: | |
92 raise Exception('%s: #%s is not finished.' % (try_build.builder_name, | |
93 try_build.build_number)) | |
borenet
2014/05/23 22:24:47
Maybe the error_on_unfinished should be pulled out
epoger
2014/05/27 15:29:56
I think either one is fine...
Maybe we should thr
borenet
2014/05/27 19:36:14
Done.
| |
94 failed_data_pull = [] | |
95 failed_gen_expectations = [] | |
96 | |
97 for try_build in try_builds: | |
98 builder = try_build.builder_name | |
99 | |
100 # Download the data. | |
101 dest_dir = os.path.join('.bench_data', builder) | |
epoger
2014/05/27 15:29:56
please use a constant to hold '.bench_data'
borenet
2014/05/27 19:36:14
Done.
| |
102 if not os.path.isdir(dest_dir): | |
103 os.makedirs(dest_dir) | |
104 try: | |
105 get_bench_data(builder, try_build.build_number, dest_dir) | |
106 except subprocess.CalledProcessError: | |
107 failed_data_pull.append(builder) | |
108 continue | |
109 | |
110 # Find the revision at which the data was generated. | |
111 revision = find_revision_from_downloaded_data(dest_dir) | |
112 if not revision: | |
113 # If we can't find a revision, then something is wrong with the data we | |
114 # downloaded. Skip this builder. | |
115 failed_data_pull.append(builder) | |
116 continue | |
117 | |
118 # Generate new expectations. | |
119 output_file = os.path.join('expectations', 'bench', | |
epoger
2014/05/27 15:29:56
This makes assumptions about what directory this s
borenet
2014/05/27 19:36:14
Done.
| |
120 'bench_expectations_%s.txt' % | |
121 builder.rstrip('-Trybot')) | |
122 try: | |
123 subprocess.check_call(['python', 'bench/gen_bench_expectations.py', | |
epoger
2014/05/27 15:29:56
Same as above (about CWD), but also / vs os.path.j
borenet
2014/05/27 19:36:14
Done.
| |
124 '-b', builder, '-o', output_file, | |
125 '-d', dest_dir, '-r', revision]) | |
126 except subprocess.CalledProcessError: | |
127 failed_gen_expectations.append(builder) | |
128 | |
129 failure = '' | |
130 if failed_data_pull: | |
131 failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull) | |
132 if failed_gen_expectations: | |
133 failure += 'Failed to generate expectations for: %s\n\n' % ','.join( | |
134 failed_gen_expectations) | |
135 if failure: | |
136 raise Exception(failure) | |
137 | |
138 | |
139 if __name__ == '__main__': | |
140 gen_bench_expectations_from_codereview(sys.argv[1]) | |
141 | |
OLD | NEW |