OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Functions for adding results to the Performance Dashboard.""" | 6 """Functions for adding results to perf dashboard.""" |
7 | 7 |
8 import calendar | 8 import calendar |
9 import datetime | 9 import datetime |
10 import httplib | 10 import httplib |
11 import json | 11 import json |
12 import os | 12 import os |
13 import urllib | 13 import urllib |
14 import urllib2 | 14 import urllib2 |
15 | 15 |
16 from slave import slave_utils | 16 from slave import slave_utils |
17 | 17 |
18 # The paths in the results dashboard URLs for sending and viewing results. | 18 # The paths in the results dashboard URLs for sending and viewing results. |
19 SEND_RESULTS_PATH = '/add_point' | 19 SEND_RESULTS_PATH = '/add_point' |
20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s' | 20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s' |
21 | |
22 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache | 21 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache |
23 # results which need to be retried. | 22 # results which need to be retried. |
24 CACHE_DIR = 'results_dashboard' | 23 CACHE_DIR = 'results_dashboard' |
25 CACHE_FILENAME = 'results_to_retry' | 24 CACHE_FILENAME = 'results_to_retry' |
26 | 25 |
27 | 26 |
28 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607. | 27 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607. |
29 def SendResults(logs_dict, perf_id, test, url, mastername, buildername, | 28 def SendResults(logname, lines, system, test_name, url, masterid, |
30 buildnumber, build_dir, supplemental_columns, | 29 buildername, buildnumber, build_dir, supplemental_columns, |
31 fail_hard=False): | 30 fail_hard=False): |
32 """Takes data in the old log format, and sends it to the dashboard. | 31 """Send results to the Chrome Performance Dashboard. |
33 | 32 |
34 This function tries to send any data from the cache file (which contains any | 33 Try to send any data from the cache file (which contains any data that wasn't |
35 data that wasn't successfully sent in a previous run), as well as the data | 34 successfully sent in a previous run), as well as the data from the arguments |
36 from the arguments provided in this run. | 35 provided in this run. |
37 | 36 |
38 Args: | 37 Args: |
39 logs_dict: Map of log filename (which contains the chart name) to a list of | 38 logname: Summary log file name. Contains the chart name. |
40 log file lines. Each one of these lines should be valid JSON and should | 39 lines: List of log-file lines. Each line should be valid JSON, and should |
41 include the properties 'traces' and 'rev'. | 40 include the properties 'traces' and 'rev'. |
42 perf_id: A string such as 'linux-release'. This is the bot name used on | 41 system: A string such as 'linux-release', which comes from perf_id. This |
43 the dashboard. | 42 is used to identify the bot in the Chrome Performance Dashboard. |
44 test: Test suite name (Note: you can also provide nested subtests | 43 test_name: Test name, which will be used as the first part of the slash |
45 under the top-level test by separating names with a slash. | 44 -separated test path on the Dashboard. (Note: If there are no slashes |
46 url: Performance Dashboard URL. | 45 in this name, then this is the test suite name. If you want to have |
47 mastername: Buildbot master name, e.g. 'chromium.perf'. Note that this is | 46 nested tests under one test suite, you could use a slash here.) |
48 *not* necessarily the same as the "master name" used on the dashboard. | 47 url: Performance Dashboard URL (including schema). |
49 This was previously incorrectly called the "master id". | 48 masterid: ID of buildbot master, e.g. 'chromium.perf' |
50 buildername: Builder name. | 49 buildername: Builder name, e.g. 'Linux QA Perf (1)' |
51 buildnumber: Build number as a string. | 50 buildnumber: Build number (a string containing the number). |
52 build_dir: Directory name, where the cache dir shall be. | 51 build_dir: Directory name, where the cache dir shall be. |
53 supplemental_columns: Dict of supplemental data to upload. | 52 supplemental_columns: Dict of supplemental data to upload. |
54 fail_hard: Whether a fatal error will cause this step of the buildbot | 53 fail_hard: Whether a fatal error will cause this step of the buildbot |
55 run to be annotated with "@@@STEP_EXCEPTION@@@". | 54 run to be annotated with "@@@STEP_EXCEPTION@@@". |
| 55 |
| 56 Returns: None |
56 """ | 57 """ |
57 new_results_lines = _GetResultsJson(logs_dict, perf_id, test, url, | 58 if not logname.endswith('-summary.dat'): |
58 mastername, buildername, buildnumber, | 59 return |
59 supplemental_columns) | 60 |
| 61 new_results_line = _GetResultsJson(logname, lines, system, test_name, url, |
| 62 masterid, buildername, buildnumber, |
| 63 supplemental_columns) |
60 # Write the new request line to the cache, in case of errors. | 64 # Write the new request line to the cache, in case of errors. |
61 cache_filename = _GetCacheFileName(build_dir) | 65 cache_filename = _GetCacheFileName(build_dir) |
62 cache = open(cache_filename, 'ab') | 66 cache = open(cache_filename, 'ab') |
63 for line in new_results_lines: | 67 cache.write('\n' + new_results_line) |
64 cache.write('\n' + line) | |
65 cache.close() | 68 cache.close() |
66 | 69 |
67 # Send all the results from this run and the previous cache to the dashboard. | 70 # Send all the results from this run and the previous cache to the dashboard. |
68 cache = open(cache_filename, 'rb') | 71 cache = open(cache_filename, 'rb') |
69 cache_lines = cache.readlines() | 72 cache_lines = cache.readlines() |
70 cache.close() | 73 cache.close() |
71 errors = [] | 74 errors = [] |
72 lines_to_retry = [] | 75 lines_to_retry = [] |
73 fatal_error = False | 76 fatal_error = False |
74 total_results = len(cache_lines) | 77 total_results = len(cache_lines) |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
112 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR) | 115 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR) |
113 if not os.path.exists(cache_dir): | 116 if not os.path.exists(cache_dir): |
114 os.makedirs(cache_dir) | 117 os.makedirs(cache_dir) |
115 cache_filename = os.path.join(cache_dir, CACHE_FILENAME) | 118 cache_filename = os.path.join(cache_dir, CACHE_FILENAME) |
116 if not os.path.exists(cache_filename): | 119 if not os.path.exists(cache_filename): |
117 # Create the file. | 120 # Create the file. |
118 open(cache_filename, 'wb').close() | 121 open(cache_filename, 'wb').close() |
119 return cache_filename | 122 return cache_filename |
120 | 123 |
121 | 124 |
122 def _GetResultsJson(logs_dict, perf_id, test_name, url, mastername, buildername, | 125 def _GetResultsJson(logname, lines, system, test_name, url, masterid, |
123 buildnumber, supplemental_columns): | 126 buildername, buildnumber, supplemental_columns): |
124 """Prepare JSON to send from the data in the given arguments. | 127 """Prepare JSON to send from the data in the given arguments. |
125 | 128 |
126 Args: | 129 Args: |
127 log_dict: A dictionary mapping summary log file names to lists of log-file | 130 logname: Summary log file name. |
128 lines. Each line is valid JSON which when parsed is a dictionary that | 131 lines: List of log-file lines. Each line is valid JSON which, when |
129 has the keys 'traces' and 'rev'. | 132 deserialized, is a dict containing the keys 'traces' and 'rev'. |
130 perf_id: A string such as 'linux-release'. | 133 system: A string such as 'linux-release', which comes from perf_id. |
131 test_name: Test name. | 134 test_name: Test name. |
132 url: Chrome Performance Dashboard URL. | 135 url: Chrome Performance Dashboard URL. |
133 mastername: Buildbot master name (this is lowercase with dots, and is not | 136 masterid: Buildbot master ID. |
134 necessarily the same as the "master" sent to the dashboard). | |
135 buildername: Builder name. | 137 buildername: Builder name. |
136 buildnumber: Build number. | 138 buildnumber: Build number. |
137 supplemental_columns: Dict of supplemental data to add. | 139 supplemental_columns: Dict of supplemental data to add. |
138 | 140 |
139 Returns: | 141 Returns: |
140 A list JSON strings that shall be sent to the dashboard. | 142 JSON that shall be sent to the Chrome Performance Dashboard. |
141 """ | 143 """ |
142 results_to_add = [] | 144 results_to_add = [] |
143 # Note that this master string is not the same as "mastername"! | |
144 master = slave_utils.GetActiveMaster() | 145 master = slave_utils.GetActiveMaster() |
| 146 bot = system |
| 147 chart_name = logname.replace('-summary.dat', '') |
| 148 for line in lines: |
| 149 data = json.loads(line) |
| 150 revision, revision_columns = _RevisionNumberColumns(data, master) |
145 | 151 |
146 for logname, log in logs_dict.iteritems(): | 152 for (trace_name, trace_values) in data['traces'].iteritems(): |
147 if not logname.endswith('-summary.dat'): | 153 is_important = trace_name in data.get('important', []) |
148 continue | 154 test_path = _TestPath(test_name, chart_name, trace_name) |
149 lines = [str(l).rstrip() for l in log] | 155 result = { |
150 chart_name = logname.replace('-summary.dat', '') | 156 'master': master, |
| 157 'bot': system, |
| 158 'test': test_path, |
| 159 'revision': revision, |
| 160 'masterid': masterid, |
| 161 'buildername': buildername, |
| 162 'buildnumber': buildnumber, |
| 163 'supplemental_columns': {} |
| 164 } |
| 165 # Add the supplemental_columns values that were passed in after the |
| 166 # calculated revision column values so that these can be overwritten. |
| 167 result['supplemental_columns'].update(revision_columns) |
| 168 result['supplemental_columns'].update(supplemental_columns) |
| 169 # Test whether we have x/y data. |
| 170 have_multi_value_data = False |
| 171 for value in trace_values: |
| 172 if isinstance(value, list): |
| 173 have_multi_value_data = True |
| 174 if have_multi_value_data: |
| 175 result['data'] = trace_values |
| 176 else: |
| 177 result['value'] = trace_values[0] |
| 178 result['error'] = trace_values[1] |
151 | 179 |
152 for line in lines: | 180 if data.get('units'): |
153 data = json.loads(line) | 181 result['units'] = data['units'] |
154 revision, revision_columns = _RevisionNumberColumns(data, master) | 182 if data.get('units_x'): |
155 | 183 result['units_x'] = data['units_x'] |
156 for (trace_name, trace_values) in data['traces'].iteritems(): | 184 if data.get('stack'): |
157 is_important = trace_name in data.get('important', []) | 185 result['stack'] = data['stack'] |
158 test_path = _TestPath(test_name, chart_name, trace_name) | 186 if is_important: |
159 result = { | 187 result['important'] = True |
160 'master': master, | 188 results_to_add.append(result) |
161 'bot': perf_id, | 189 _PrintLinkStep(url, master, bot, test_name, revision) |
162 'test': test_path, | 190 return json.dumps(results_to_add) |
163 'revision': revision, | |
164 'masterid': mastername, | |
165 'buildername': buildername, | |
166 'buildnumber': buildnumber, | |
167 'supplemental_columns': {} | |
168 } | |
169 # Add the supplemental_columns values that were passed in after the | |
170 # calculated revision column values so that these can be overwritten. | |
171 result['supplemental_columns'].update(revision_columns) | |
172 result['supplemental_columns'].update(supplemental_columns) | |
173 # Test whether we have x/y data. | |
174 have_multi_value_data = False | |
175 for value in trace_values: | |
176 if isinstance(value, list): | |
177 have_multi_value_data = True | |
178 if have_multi_value_data: | |
179 result['data'] = trace_values | |
180 else: | |
181 result['value'] = trace_values[0] | |
182 result['error'] = trace_values[1] | |
183 | |
184 if data.get('units'): | |
185 result['units'] = data['units'] | |
186 if data.get('units_x'): | |
187 result['units_x'] = data['units_x'] | |
188 if is_important: | |
189 result['important'] = True | |
190 results_to_add.append(result) | |
191 | |
192 _PrintLinkStep(url, master, perf_id, test_name, revision) | |
193 | |
194 # It was experimentally determined that 512 points takes about 7.5 seconds | |
195 # to handle, and App Engine times out after about 60 seconds. | |
196 results_lists = _ChunkList(results_to_add, 500) | |
197 return map(json.dumps, results_lists) | |
198 | |
199 | |
200 def _ChunkList(items, chunk_size): | |
201 """Divides a list into a list of sublists no longer than the given size. | |
202 | |
203 Args: | |
204 items: The original list of items. Can be very long. | |
205 chunk_size: The maximum size of sublists in the results returned. | |
206 | |
207 Returns: | |
208 A list of sublists (which contain the original items, in order). | |
209 """ | |
210 chunks = [] | |
211 items_left = items[:] | |
212 while items_left: | |
213 chunks.append(items_left[:chunk_size]) | |
214 items_left = items_left[chunk_size:] | |
215 return chunks | |
216 | 191 |
217 | 192 |
218 def _RevisionNumberColumns(data, master): | 193 def _RevisionNumberColumns(data, master): |
219 """Get the revision number and revision-related columns from the given data. | 194 """Get the revision number and revision-related columns from the given data. |
220 | 195 |
221 Args: | 196 Args: |
222 data: A dict of information from one line of the log file. | 197 data: A dict of information from one line of the log file. |
223 master: The name of the buildbot master. | 198 master: The name of the buildbot master. |
224 | 199 |
225 Returns: | 200 Returns: |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
331 system: A string such as 'linux-release', which comes from perf_id. | 306 system: A string such as 'linux-release', which comes from perf_id. |
332 test_path: Slash-separated test path, e.g. "moz/times" | 307 test_path: Slash-separated test path, e.g. "moz/times" |
333 revision: Revision number. | 308 revision: Revision number. |
334 """ | 309 """ |
335 results_link = url + RESULTS_LINK_PATH % ( | 310 results_link = url + RESULTS_LINK_PATH % ( |
336 urllib.quote(master), | 311 urllib.quote(master), |
337 urllib.quote(system), | 312 urllib.quote(system), |
338 urllib.quote(test_path), | 313 urllib.quote(test_path), |
339 revision) | 314 revision) |
340 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link) | 315 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link) |
OLD | NEW |