Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(568)

Side by Side Diff: scripts/slave/results_dashboard.py

Issue 217053012: Make results_dashboard send just one request per test run. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Group results into lists of limited length. Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | scripts/slave/runtest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Functions for adding results to perf dashboard.""" 6 """Functions for adding results to the Performance Dashboard."""
7 7
8 import calendar 8 import calendar
9 import datetime 9 import datetime
10 import httplib 10 import httplib
11 import json 11 import json
12 import os 12 import os
13 import urllib 13 import urllib
14 import urllib2 14 import urllib2
15 15
16 from slave import slave_utils 16 from slave import slave_utils
17 17
18 # The paths in the results dashboard URLs for sending and viewing results. 18 # The paths in the results dashboard URLs for sending and viewing results.
19 SEND_RESULTS_PATH = '/add_point' 19 SEND_RESULTS_PATH = '/add_point'
20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s' 20 RESULTS_LINK_PATH = '/report?masters=%s&bots=%s&tests=%s&rev=%s'
21
21 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache 22 # CACHE_DIR/CACHE_FILENAME will be created in options.build_dir to cache
22 # results which need to be retried. 23 # results which need to be retried.
23 CACHE_DIR = 'results_dashboard' 24 CACHE_DIR = 'results_dashboard'
24 CACHE_FILENAME = 'results_to_retry' 25 CACHE_FILENAME = 'results_to_retry'
25 26
26 27
27 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607. 28 #TODO(xusydoc): set fail_hard to True when bots stabilize. See crbug.com/222607.
28 def SendResults(logname, lines, system, test_name, url, masterid, 29 def SendResults(logs_dict, perf_id, test, url, mastername, buildername,
29 buildername, buildnumber, build_dir, supplemental_columns, 30 buildnumber, build_dir, supplemental_columns,
30 fail_hard=False): 31 fail_hard=False):
31 """Send results to the Chrome Performance Dashboard. 32 """Takes data in the old log format, and sends it to the dashboard.
32 33
33 Try to send any data from the cache file (which contains any data that wasn't 34 This function tries to send any data from the cache file (which contains any
34 successfully sent in a previous run), as well as the data from the arguments 35 data that wasn't successfully sent in a previous run), as well as the data
35 provided in this run. 36 from the arguments provided in this run.
36 37
37 Args: 38 Args:
38 logname: Summary log file name. Contains the chart name. 39 logs_dict: Map of log filename (which contains the chart name) to a list of
39 lines: List of log-file lines. Each line should be valid JSON, and should 40 log file lines. Each one of these lines should be valid JSON and should
40 include the properties 'traces' and 'rev'. 41 include the properties 'traces' and 'rev'.
41 system: A string such as 'linux-release', which comes from perf_id. This 42 perf_id: A string such as 'linux-release'. This is the bot name used on
42 is used to identify the bot in the Chrome Performance Dashboard. 43 the dashboard.
43 test_name: Test name, which will be used as the first part of the slash 44 test: Test suite name (Note: you can also provide nested subtests
44 -separated test path on the Dashboard. (Note: If there are no slashes 45 under the top-level test by separating names with a slash.
45 in this name, then this is the test suite name. If you want to have 46 url: Performance Dashboard URL.
46 nested tests under one test suite, you could use a slash here.) 47 mastername: Buildbot master name, e.g. 'chromium.perf'. Note that this is
47 url: Performance Dashboard URL (including schema). 48 *not* necessarily the same as the "master name" used on the dashboard.
48 masterid: ID of buildbot master, e.g. 'chromium.perf' 49 This was previously incorrectly called the "master id".
49 buildername: Builder name, e.g. 'Linux QA Perf (1)' 50 buildername: Builder name.
50 buildnumber: Build number (a string containing the number). 51 buildnumber: Build number as a string.
51 build_dir: Directory name, where the cache dir shall be. 52 build_dir: Directory name, where the cache dir shall be.
52 supplemental_columns: Dict of supplemental data to upload. 53 supplemental_columns: Dict of supplemental data to upload.
53 fail_hard: Whether a fatal error will cause this step of the buildbot 54 fail_hard: Whether a fatal error will cause this step of the buildbot
54 run to be annotated with "@@@STEP_EXCEPTION@@@". 55 run to be annotated with "@@@STEP_EXCEPTION@@@".
55
56 Returns: None
57 """ 56 """
58 if not logname.endswith('-summary.dat'): 57 new_results_lines = _GetResultsJson(logs_dict, perf_id, test, url,
59 return 58 mastername, buildername, buildnumber,
60 59 supplemental_columns)
61 new_results_line = _GetResultsJson(logname, lines, system, test_name, url,
62 masterid, buildername, buildnumber,
63 supplemental_columns)
64 # Write the new request line to the cache, in case of errors. 60 # Write the new request line to the cache, in case of errors.
65 cache_filename = _GetCacheFileName(build_dir) 61 cache_filename = _GetCacheFileName(build_dir)
66 cache = open(cache_filename, 'ab') 62 cache = open(cache_filename, 'ab')
67 cache.write('\n' + new_results_line) 63 for line in new_results_lines:
64 cache.write('\n' + line)
68 cache.close() 65 cache.close()
69 66
70 # Send all the results from this run and the previous cache to the dashboard. 67 # Send all the results from this run and the previous cache to the dashboard.
71 cache = open(cache_filename, 'rb') 68 cache = open(cache_filename, 'rb')
72 cache_lines = cache.readlines() 69 cache_lines = cache.readlines()
73 cache.close() 70 cache.close()
74 errors = [] 71 errors = []
75 lines_to_retry = [] 72 lines_to_retry = []
76 fatal_error = False 73 fatal_error = False
77 total_results = len(cache_lines) 74 total_results = len(cache_lines)
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
115 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR) 112 cache_dir = os.path.join(os.path.abspath(build_dir), CACHE_DIR)
116 if not os.path.exists(cache_dir): 113 if not os.path.exists(cache_dir):
117 os.makedirs(cache_dir) 114 os.makedirs(cache_dir)
118 cache_filename = os.path.join(cache_dir, CACHE_FILENAME) 115 cache_filename = os.path.join(cache_dir, CACHE_FILENAME)
119 if not os.path.exists(cache_filename): 116 if not os.path.exists(cache_filename):
120 # Create the file. 117 # Create the file.
121 open(cache_filename, 'wb').close() 118 open(cache_filename, 'wb').close()
122 return cache_filename 119 return cache_filename
123 120
124 121
125 def _GetResultsJson(logname, lines, system, test_name, url, masterid, 122 def _GetResultsJson(logs_dict, perf_id, test_name, url, mastername, buildername,
126 buildername, buildnumber, supplemental_columns): 123 buildnumber, supplemental_columns):
127 """Prepare JSON to send from the data in the given arguments. 124 """Prepare JSON to send from the data in the given arguments.
128 125
129 Args: 126 Args:
130 logname: Summary log file name. 127 log_dict: A dictionary mapping summary log file names to lists of log-file
131 lines: List of log-file lines. Each line is valid JSON which, when 128 lines. Each line is valid JSON which when parsed is a dictionary that
132 deserialized, is a dict containing the keys 'traces' and 'rev'. 129 has the keys 'traces' and 'rev'.
133 system: A string such as 'linux-release', which comes from perf_id. 130 perf_id: A string such as 'linux-release'.
134 test_name: Test name. 131 test_name: Test name.
135 url: Chrome Performance Dashboard URL. 132 url: Chrome Performance Dashboard URL.
136 masterid: Buildbot master ID. 133 mastername: Buildbot master name (this is lowercase with dots, and is not
134 necessarily the same as the "master" sent to the dashboard).
137 buildername: Builder name. 135 buildername: Builder name.
138 buildnumber: Build number. 136 buildnumber: Build number.
139 supplemental_columns: Dict of supplemental data to add. 137 supplemental_columns: Dict of supplemental data to add.
140 138
141 Returns: 139 Returns:
142 JSON that shall be sent to the Chrome Performance Dashboard. 140 A list JSON strings that shall be sent to the dashboard.
143 """ 141 """
144 results_to_add = [] 142 results_to_add = []
143 # Note that this master string is not the same as "mastername"!
145 master = slave_utils.GetActiveMaster() 144 master = slave_utils.GetActiveMaster()
146 bot = system
147 chart_name = logname.replace('-summary.dat', '')
148 for line in lines:
149 data = json.loads(line)
150 revision, revision_columns = _RevisionNumberColumns(data, master)
151 145
152 for (trace_name, trace_values) in data['traces'].iteritems(): 146 for logname, log in logs_dict.iteritems():
153 is_important = trace_name in data.get('important', []) 147 if not logname.endswith('-summary.dat'):
154 test_path = _TestPath(test_name, chart_name, trace_name) 148 continue
155 result = { 149 lines = [str(l).rstrip() for l in log]
156 'master': master, 150 chart_name = logname.replace('-summary.dat', '')
157 'bot': system,
158 'test': test_path,
159 'revision': revision,
160 'masterid': masterid,
161 'buildername': buildername,
162 'buildnumber': buildnumber,
163 'supplemental_columns': {}
164 }
165 # Add the supplemental_columns values that were passed in after the
166 # calculated revision column values so that these can be overwritten.
167 result['supplemental_columns'].update(revision_columns)
168 result['supplemental_columns'].update(supplemental_columns)
169 # Test whether we have x/y data.
170 have_multi_value_data = False
171 for value in trace_values:
172 if isinstance(value, list):
173 have_multi_value_data = True
174 if have_multi_value_data:
175 result['data'] = trace_values
176 else:
177 result['value'] = trace_values[0]
178 result['error'] = trace_values[1]
179 151
180 if data.get('units'): 152 for line in lines:
181 result['units'] = data['units'] 153 data = json.loads(line)
182 if data.get('units_x'): 154 revision, revision_columns = _RevisionNumberColumns(data, master)
183 result['units_x'] = data['units_x'] 155
184 if data.get('stack'): 156 for (trace_name, trace_values) in data['traces'].iteritems():
185 result['stack'] = data['stack'] 157 is_important = trace_name in data.get('important', [])
186 if is_important: 158 test_path = _TestPath(test_name, chart_name, trace_name)
187 result['important'] = True 159 result = {
188 results_to_add.append(result) 160 'master': master,
189 _PrintLinkStep(url, master, bot, test_name, revision) 161 'bot': perf_id,
ghost stip (do not use) 2014/04/01 00:28:07 as far as I can tell, this old code worked because
190 return json.dumps(results_to_add) 162 'test': test_path,
163 'revision': revision,
164 'masterid': mastername,
165 'buildername': buildername,
166 'buildnumber': buildnumber,
167 'supplemental_columns': {}
168 }
169 # Add the supplemental_columns values that were passed in after the
170 # calculated revision column values so that these can be overwritten.
171 result['supplemental_columns'].update(revision_columns)
172 result['supplemental_columns'].update(supplemental_columns)
173 # Test whether we have x/y data.
174 have_multi_value_data = False
175 for value in trace_values:
176 if isinstance(value, list):
177 have_multi_value_data = True
178 if have_multi_value_data:
179 result['data'] = trace_values
180 else:
181 result['value'] = trace_values[0]
182 result['error'] = trace_values[1]
183
184 if data.get('units'):
185 result['units'] = data['units']
186 if data.get('units_x'):
187 result['units_x'] = data['units_x']
188 if is_important:
189 result['important'] = True
190 results_to_add.append(result)
191
192 _PrintLinkStep(url, master, perf_id, test_name, revision)
ghost stip (do not use) 2014/04/01 00:28:07 add a check here for `if logsdict`. my guess is th
qyearsley 2014/04/01 01:14:24 Alright, so I should add a check here (and also a
193
194 # It was experimentally determined that 512 points takes about 7.5 seconds
195 # to handle, and App Engine times out after about 60 seconds.
196 results_lists = _ChunkList(results_to_add, 500)
197 return map(json.dumps, results_lists)
198
199
200 def _ChunkList(items, chunk_size):
201 """Divides a list into a list of sublists no longer than the given size.
202
203 Args:
204 items: The original list of items. Can be very long.
205 chunk_size: The maximum size of sublists in the results returned.
206
207 Returns:
208 A list of sublists (which contain the original items, in order).
209 """
210 chunks = []
211 items_left = items[:]
212 while items_left:
213 chunks.append(items_left[:chunk_size])
214 items_left = items_left[chunk_size:]
215 return chunks
191 216
192 217
193 def _RevisionNumberColumns(data, master): 218 def _RevisionNumberColumns(data, master):
194 """Get the revision number and revision-related columns from the given data. 219 """Get the revision number and revision-related columns from the given data.
195 220
196 Args: 221 Args:
197 data: A dict of information from one line of the log file. 222 data: A dict of information from one line of the log file.
198 master: The name of the buildbot master. 223 master: The name of the buildbot master.
199 224
200 Returns: 225 Returns:
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 system: A string such as 'linux-release', which comes from perf_id. 331 system: A string such as 'linux-release', which comes from perf_id.
307 test_path: Slash-separated test path, e.g. "moz/times" 332 test_path: Slash-separated test path, e.g. "moz/times"
308 revision: Revision number. 333 revision: Revision number.
309 """ 334 """
310 results_link = url + RESULTS_LINK_PATH % ( 335 results_link = url + RESULTS_LINK_PATH % (
311 urllib.quote(master), 336 urllib.quote(master),
312 urllib.quote(system), 337 urllib.quote(system),
313 urllib.quote(test_path), 338 urllib.quote(test_path),
314 revision) 339 revision)
315 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link) 340 print '@@@STEP_LINK@%s@%s@@@' % ('Results Dashboard', results_link)
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/runtest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698