OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Script to parse perf data from Chrome Endure test executions, to be graphed. | |
7 | |
8 This script connects via HTTP to a buildbot master in order to scrape and parse | |
9 perf data from Chrome Endure tests that have been run. The perf data is then | |
10 stored in local text files to be graphed by the Chrome Endure graphing code. | |
11 | |
12 It is assumed that any Chrome Endure tests that show up on the waterfall have | |
13 names that are of the following form: | |
14 | |
15 "endure_<webapp_name>-<test_name>" | |
16 | |
17 This script accepts either a URL or a local path as a buildbot location. | |
18 It switches its behavior if a URL is given, or a local path is given. | |
19 | |
20 When a URL is given, it gets buildbot logs from the buildbot builders URL | |
21 e.g. http://build.chromium.org/p/chromium.endure/builders/. | |
22 | |
23 When a local path is given, it gets buildbot logs from buildbot's internal | |
24 files in the directory e.g. /home/chrome-bot/buildbot. | |
25 """ | |
26 | |
27 import cPickle | |
28 import getpass | |
29 import logging | |
30 import optparse | |
31 import os | |
32 import re | |
33 import simplejson | |
34 import socket | |
35 import string | |
36 import sys | |
37 import time | |
38 import urllib | |
39 import urllib2 | |
40 | |
41 | |
42 CHROME_ENDURE_SLAVE_NAMES = [ | |
43 'Linux QA Perf (0)', | |
44 'Linux QA Perf (1)', | |
45 'Linux QA Perf (2)', | |
46 'Linux QA Perf (3)', | |
47 'Linux QA Perf (4)', | |
48 'Linux QA Perf (dbg)(0)', | |
49 'Linux QA Perf (dbg)(1)', | |
50 'Linux QA Perf (dbg)(2)', | |
51 'Linux QA Perf (dbg)(3)', | |
52 'Linux QA Perf (dbg)(4)', | |
53 ] | |
54 | |
55 BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.endure/builders/' | |
56 LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__), | |
57 '_parser_last_processed.txt') | |
58 LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser() | |
59 MANGLE_TRANSLATION = string.maketrans(' ()', '___') | |
60 | |
61 def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir): | |
62 """Sets up the directory containing results for a particular test, if needed. | |
63 | |
64 Args: | |
65 webapp_name: The string name of the webapp associated with the given test. | |
66 test_name: The string name of the test. | |
67 dest_dir: The name of the destination directory that needs to be set up. | |
68 """ | |
69 if not os.path.exists(dest_dir): | |
70 os.mkdir(dest_dir) # Test name directory. | |
71 os.chmod(dest_dir, 0755) | |
72 | |
73 # Create config file. | |
74 config_file = os.path.join(dest_dir, 'config.js') | |
75 if not os.path.exists(config_file): | |
76 with open(config_file, 'w') as f: | |
77 f.write('var Config = {\n') | |
78 f.write('buildslave: "Chrome Endure Bots",\n') | |
79 f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(), | |
80 test_name)) | |
81 f.write('};\n') | |
82 os.chmod(config_file, 0755) | |
83 | |
84 # Set up symbolic links to the real graphing files. | |
85 link_file = os.path.join(dest_dir, 'index.html') | |
86 if not os.path.exists(link_file): | |
87 os.symlink('../../endure_plotter.html', link_file) | |
88 link_file = os.path.join(dest_dir, 'endure_plotter.js') | |
89 if not os.path.exists(link_file): | |
90 os.symlink('../../endure_plotter.js', link_file) | |
91 link_file = os.path.join(dest_dir, 'js') | |
92 if not os.path.exists(link_file): | |
93 os.symlink('../../js', link_file) | |
94 | |
95 | |
96 def WriteToDataFile(new_line, existing_lines, revision, data_file): | |
97 """Writes a new entry to an existing perf data file to be graphed. | |
98 | |
99 If there's an existing line with the same revision number, overwrite its data | |
100 with the new line. Else, prepend the info for the new revision. | |
101 | |
102 Args: | |
103 new_line: A dictionary representing perf information for the new entry. | |
104 existing_lines: A list of string lines from the existing perf data file. | |
105 revision: The string revision number associated with the new perf entry. | |
106 data_file: The string name of the perf data file to which to write. | |
107 """ | |
108 overwritten = False | |
109 for i, line in enumerate(existing_lines): | |
110 line_dict = simplejson.loads(line) | |
111 if line_dict['rev'] == revision: | |
112 existing_lines[i] = simplejson.dumps(new_line) | |
113 overwritten = True | |
114 break | |
115 elif int(line_dict['rev']) < int(revision): | |
116 break | |
117 if not overwritten: | |
118 existing_lines.insert(0, simplejson.dumps(new_line)) | |
119 | |
120 with open(data_file, 'w') as f: | |
121 f.write('\n'.join(existing_lines)) | |
122 os.chmod(data_file, 0755) | |
123 | |
124 | |
125 def OutputPerfData(revision, graph_name, values, units, units_x, dest_dir, | |
126 is_stacked=False, stack_order=[]): | |
127 """Outputs perf data to a local text file to be graphed. | |
128 | |
129 Args: | |
130 revision: The string revision number associated with the perf data. | |
131 graph_name: The string name of the graph on which to plot the data. | |
132 values: A dict which maps a description to a value. A value is either a | |
133 single data value to be graphed, or a list of 2-tuples | |
134 representing (x, y) points to be graphed for long-running tests. | |
135 units: The string description for the y-axis units on the graph. | |
136 units_x: The string description for the x-axis units on the graph. Should | |
137 be set to None if the results are not for long-running graphs. | |
138 dest_dir: The name of the destination directory to which to write. | |
139 is_stacked: True to draw a "stacked" graph. First-come values are | |
140 stacked at bottom by default. | |
141 stack_order: A list that contains key strings in the order to stack values | |
142 in the graph. | |
143 """ | |
144 # Update graphs.dat, which contains metadata associated with each graph. | |
145 existing_graphs = [] | |
146 graphs_file = os.path.join(dest_dir, 'graphs.dat') | |
147 if os.path.exists(graphs_file): | |
148 with open(graphs_file, 'r') as f: | |
149 existing_graphs = simplejson.loads(f.read()) | |
150 is_new_graph = True | |
151 for graph in existing_graphs: | |
152 if graph['name'] == graph_name: | |
153 is_new_graph = False | |
154 break | |
155 if is_new_graph: | |
156 new_graph = { | |
157 'name': graph_name, | |
158 'units': units, | |
159 'important': False, | |
160 } | |
161 if units_x: | |
162 new_graph['units_x'] = units_x | |
163 existing_graphs.append(new_graph) | |
164 existing_graphs = sorted(existing_graphs, key=lambda x: x['name']) | |
165 with open(graphs_file, 'w') as f: | |
166 f.write(simplejson.dumps(existing_graphs, indent=2)) | |
167 os.chmod(graphs_file, 0755) | |
168 | |
169 # Update summary data file, containing the actual data to be graphed. | |
170 data_file_name = graph_name + '-summary.dat' | |
171 existing_lines = [] | |
172 data_file = os.path.join(dest_dir, data_file_name) | |
173 if os.path.exists(data_file): | |
174 with open(data_file, 'r') as f: | |
175 existing_lines = f.readlines() | |
176 existing_lines = map(lambda x: x.strip(), existing_lines) | |
177 new_traces = {} | |
178 for description in values: | |
179 value = values[description] | |
180 if units_x: | |
181 points = [] | |
182 for point in value: | |
183 points.append([str(point[0]), str(point[1])]) | |
184 new_traces[description] = points | |
185 else: | |
186 new_traces[description] = [str(value), str(0.0)] | |
187 new_line = { | |
188 'traces': new_traces, | |
189 'rev': revision | |
190 } | |
191 if is_stacked: | |
192 new_line['stack'] = True | |
193 new_line['stack_order'] = stack_order | |
194 | |
195 WriteToDataFile(new_line, existing_lines, revision, data_file) | |
196 | |
197 | |
198 def OutputEventData(revision, event_dict, dest_dir): | |
199 """Outputs event data to a local text file to be graphed. | |
200 | |
201 Args: | |
202 revision: The string revision number associated with the event data. | |
203 event_dict: A dict which maps a description to an array of tuples | |
204 representing event data to be graphed. | |
205 dest_dir: The name of the destination directory to which to write. | |
206 """ | |
207 data_file_name = '_EVENT_-summary.dat' | |
208 existing_lines = [] | |
209 data_file = os.path.join(dest_dir, data_file_name) | |
210 if os.path.exists(data_file): | |
211 with open(data_file, 'r') as f: | |
212 existing_lines = f.readlines() | |
213 existing_lines = map(lambda x: x.strip(), existing_lines) | |
214 | |
215 new_events = {} | |
216 for description in event_dict: | |
217 event_list = event_dict[description] | |
218 value_list = [] | |
219 for event_time, event_data in event_list: | |
220 value_list.append([str(event_time), event_data]) | |
221 new_events[description] = value_list | |
222 | |
223 new_line = { | |
224 'rev': revision, | |
225 'events': new_events | |
226 } | |
227 | |
228 WriteToDataFile(new_line, existing_lines, revision, data_file) | |
229 | |
230 | |
231 def UpdatePerfDataFromFetchedContent( | |
232 revision, content, webapp_name, test_name, graph_dir, only_dmp=False): | |
233 """Update perf data from fetched stdio data. | |
234 | |
235 Args: | |
236 revision: The string revision number associated with the new perf entry. | |
237 content: Fetched stdio data. | |
238 webapp_name: A name of the webapp. | |
239 test_name: A name of the test. | |
240 graph_dir: A path to the graph directory. | |
241 only_dmp: True if only Deep Memory Profiler results should be used. | |
242 """ | |
243 perf_data_raw = [] | |
244 | |
245 def AppendRawPerfData(graph_name, description, value, units, units_x, | |
246 webapp_name, test_name, is_stacked=False): | |
247 perf_data_raw.append({ | |
248 'graph_name': graph_name, | |
249 'description': description, | |
250 'value': value, | |
251 'units': units, | |
252 'units_x': units_x, | |
253 'webapp_name': webapp_name, | |
254 'test_name': test_name, | |
255 'stack': is_stacked, | |
256 }) | |
257 | |
258 # First scan for short-running perf test results. | |
259 for match in re.findall( | |
260 r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', content): | |
261 if (not only_dmp) or match[0].endswith('-DMP'): | |
262 try: | |
263 match2 = eval(match[2]) | |
264 except SyntaxError: | |
265 match2 = None | |
266 if match2: | |
267 AppendRawPerfData(match[0], match[1], match2, match[3], None, | |
268 webapp_name, webapp_name) | |
269 | |
270 # Next scan for long-running perf test results. | |
271 for match in re.findall( | |
272 r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', content): | |
273 if (not only_dmp) or match[0].endswith('-DMP'): | |
274 try: | |
275 match2 = eval(match[2]) | |
276 except SyntaxError: | |
277 match2 = None | |
278 # TODO(dmikurube): Change the condition to use stacked graph when we | |
279 # determine how to specify it. | |
280 if match2: | |
281 AppendRawPerfData(match[0], match[1], match2, match[3], match[4], | |
282 webapp_name, test_name, match[0].endswith('-DMP')) | |
283 | |
284 # Next scan for events in the test results. | |
285 for match in re.findall( | |
286 r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', content): | |
287 try: | |
288 match1 = eval(match[1]) | |
289 except SyntaxError: | |
290 match1 = None | |
291 if match1: | |
292 AppendRawPerfData('_EVENT_', match[0], match1, None, None, | |
293 webapp_name, test_name) | |
294 | |
295 # For each graph_name/description pair that refers to a long-running test | |
296 # result or an event, concatenate all the results together (assume results | |
297 # in the input file are in the correct order). For short-running test | |
298 # results, keep just one if more than one is specified. | |
299 perf_data = {} # Maps a graph-line key to a perf data dictionary. | |
300 for data in perf_data_raw: | |
301 key_graph = data['graph_name'] | |
302 key_description = data['description'] | |
303 if not key_graph in perf_data: | |
304 perf_data[key_graph] = { | |
305 'graph_name': data['graph_name'], | |
306 'value': {}, | |
307 'units': data['units'], | |
308 'units_x': data['units_x'], | |
309 'webapp_name': data['webapp_name'], | |
310 'test_name': data['test_name'], | |
311 } | |
312 perf_data[key_graph]['stack'] = data['stack'] | |
313 if 'stack_order' not in perf_data[key_graph]: | |
314 perf_data[key_graph]['stack_order'] = [] | |
315 if (data['stack'] and | |
316 data['description'] not in perf_data[key_graph]['stack_order']): | |
317 perf_data[key_graph]['stack_order'].append(data['description']) | |
318 | |
319 if data['graph_name'] != '_EVENT_' and not data['units_x']: | |
320 # Short-running test result. | |
321 perf_data[key_graph]['value'][key_description] = data['value'] | |
322 else: | |
323 # Long-running test result or event. | |
324 if key_description in perf_data[key_graph]['value']: | |
325 perf_data[key_graph]['value'][key_description] += data['value'] | |
326 else: | |
327 perf_data[key_graph]['value'][key_description] = data['value'] | |
328 | |
329 # Finally, for each graph-line in |perf_data|, update the associated local | |
330 # graph data files if necessary. | |
331 for perf_data_key in perf_data: | |
332 perf_data_dict = perf_data[perf_data_key] | |
333 | |
334 dest_dir = os.path.join(graph_dir, perf_data_dict['webapp_name']) | |
335 if not os.path.exists(dest_dir): | |
336 os.mkdir(dest_dir) # Webapp name directory. | |
337 os.chmod(dest_dir, 0755) | |
338 dest_dir = os.path.join(dest_dir, perf_data_dict['test_name']) | |
339 | |
340 SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'], | |
341 perf_data_dict['test_name'], dest_dir) | |
342 if perf_data_dict['graph_name'] == '_EVENT_': | |
343 OutputEventData(revision, perf_data_dict['value'], dest_dir) | |
344 else: | |
345 OutputPerfData(revision, perf_data_dict['graph_name'], | |
346 perf_data_dict['value'], | |
347 perf_data_dict['units'], perf_data_dict['units_x'], | |
348 dest_dir, | |
349 perf_data_dict['stack'], perf_data_dict['stack_order']) | |
350 | |
351 | |
352 def SlaveLocation(master_location, slave_info): | |
353 """Returns slave location for |master_location| and |slave_info|.""" | |
354 if master_location.startswith('http://'): | |
355 return master_location + urllib.quote(slave_info['slave_name']) | |
356 else: | |
357 return os.path.join(master_location, | |
358 slave_info['slave_name'].translate(MANGLE_TRANSLATION)) | |
359 | |
360 | |
361 def GetRevisionAndLogs(slave_location, build_num): | |
362 """Get a revision number and log locations. | |
363 | |
364 Args: | |
365 slave_location: A URL or a path to the build slave data. | |
366 build_num: A build number. | |
367 | |
368 Returns: | |
369 A pair of the revision number and a list of strings that contain locations | |
370 of logs. (False, []) in case of error. | |
371 """ | |
372 if slave_location.startswith('http://'): | |
373 location = slave_location + '/builds/' + str(build_num) | |
374 else: | |
375 location = os.path.join(slave_location, str(build_num)) | |
376 | |
377 revision = False | |
378 logs = [] | |
379 fp = None | |
380 try: | |
381 if location.startswith('http://'): | |
382 fp = urllib2.urlopen(location) | |
383 contents = fp.read() | |
384 revisions = re.findall(r'<td class="left">got_revision</td>\s+' | |
385 '<td>(\d+)</td>\s+<td>Source</td>', contents) | |
386 if revisions: | |
387 revision = revisions[0] | |
388 logs = [location + link + '/text' for link | |
389 in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)] | |
390 else: | |
391 fp = open(location, 'rb') | |
392 build = cPickle.load(fp) | |
393 properties = build.getProperties() | |
394 if properties.has_key('got_revision'): | |
395 revision = build.getProperty('got_revision') | |
396 candidates = os.listdir(slave_location) | |
397 logs = [os.path.join(slave_location, filename) | |
398 for filename in candidates | |
399 if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)] | |
400 | |
401 except urllib2.URLError, e: | |
402 logging.exception('Error reading build URL "%s": %s', location, str(e)) | |
403 return False, [] | |
404 except (IOError, OSError), e: | |
405 logging.exception('Error reading build file "%s": %s', location, str(e)) | |
406 return False, [] | |
407 finally: | |
408 if fp: | |
409 fp.close() | |
410 | |
411 return revision, logs | |
412 | |
413 | |
414 def ExtractTestNames(log_location, is_dbg): | |
415 """Extract test names from |log_location|. | |
416 | |
417 Returns: | |
418 A dict of a log location, webapp's name and test's name. False if error. | |
419 """ | |
420 if log_location.startswith('http://'): | |
421 location = urllib.unquote(log_location) | |
422 test_pattern = r'endure_([^_]+)(_test |-)([^/]+)/' | |
423 else: | |
424 location = log_location | |
425 test_pattern = r'endure_([^_]+)(_test_|-)([^/]+)-stdio' | |
426 | |
427 match = match[0] | |
428 webapp_name = match[0] | |
429 webapp_name = webapp_name + '_dbg' if is_dbg else webapp_name | |
430 test_name = match[2] | |
431 | |
432 return { | |
433 'location': log_location, | |
434 'webapp_name': webapp_name, | |
435 'test_name': test_name, | |
436 } | |
437 | |
438 | |
439 def GetStdioContents(stdio_location): | |
440 """Gets appropriate stdio contents. | |
441 | |
442 Returns: | |
443 A content string of the stdio log. None in case of error. | |
444 """ | |
445 fp = None | |
446 contents = '' | |
447 try: | |
448 if stdio_location.startswith('http://'): | |
449 fp = urllib2.urlopen(stdio_location, timeout=60) | |
450 # Since in-progress test output is sent chunked, there's no EOF. We need | |
451 # to specially handle this case so we don't hang here waiting for the | |
452 # test to complete. | |
453 start_time = time.time() | |
454 while True: | |
455 data = fp.read(1024) | |
456 if not data: | |
457 break | |
458 contents += data | |
459 if time.time() - start_time >= 30: # Read for at most 30 seconds. | |
460 break | |
461 else: | |
462 fp = open(stdio_location) | |
463 data = fp.read() | |
464 contents = '' | |
465 index = 0 | |
466 | |
467 # Buildbot log files are stored in the netstring format. | |
468 # http://en.wikipedia.org/wiki/Netstring | |
469 while index < len(data): | |
470 index2 = index | |
471 while data[index2].isdigit(): | |
472 index2 += 1 | |
473 if data[index2] != ':': | |
474 logging.error('Log file is not in expected format: %s' % | |
475 stdio_location) | |
476 contents = None | |
477 break | |
478 length = int(data[index:index2]) | |
479 index = index2 + 1 | |
480 channel = int(data[index]) | |
481 index += 1 | |
482 if data[index+length-1] != ',': | |
483 logging.error('Log file is not in expected format: %s' % | |
484 stdio_location) | |
485 contents = None | |
486 break | |
487 if channel == 0: | |
488 contents += data[index:(index+length-1)] | |
489 index += length | |
490 | |
491 except (urllib2.URLError, socket.error, IOError, OSError), e: | |
492 # Issue warning but continue to the next stdio link. | |
493 logging.warning('Error reading test stdio data "%s": %s', | |
494 stdio_location, str(e)) | |
495 finally: | |
496 if fp: | |
497 fp.close() | |
498 | |
499 return contents | |
500 | |
501 | |
502 def UpdatePerfDataForSlaveAndBuild( | |
503 slave_info, build_num, graph_dir, master_location): | |
504 """Process updated perf data for a particular slave and build number. | |
505 | |
506 Args: | |
507 slave_info: A dictionary containing information about the slave to process. | |
508 build_num: The particular build number on the slave to process. | |
509 graph_dir: A path to the graph directory. | |
510 master_location: A URL or a path to the build master data. | |
511 | |
512 Returns: | |
513 True if the perf data for the given slave/build is updated properly, or | |
514 False if any critical error occurred. | |
515 """ | |
516 if not master_location.startswith('http://'): | |
517 # Source is a file. | |
518 from buildbot.status import builder | |
519 | |
520 slave_location = SlaveLocation(master_location, slave_info) | |
521 logging.debug(' %s, build %d.', slave_info['slave_name'], build_num) | |
522 is_dbg = '(dbg)' in slave_info['slave_name'] | |
523 | |
524 revision, logs = GetRevisionAndLogs(slave_location, build_num) | |
525 if not revision: | |
526 return False | |
527 | |
528 stdios = [] | |
529 for log_location in logs: | |
530 stdio = ExtractTestNames(log_location, is_dbg) | |
531 if not stdio: | |
532 return False | |
533 stdios.append(stdio) | |
534 | |
535 for stdio in stdios: | |
536 stdio_location = stdio['location'] | |
537 contents = GetStdioContents(stdio_location) | |
538 | |
539 if contents: | |
540 UpdatePerfDataFromFetchedContent(revision, contents, | |
541 stdio['webapp_name'], | |
542 stdio['test_name'], | |
543 graph_dir, is_dbg) | |
544 | |
545 return True | |
546 | |
547 | |
548 def GetMostRecentBuildNum(master_location, slave_name): | |
549 """Gets the most recent buld number for |slave_name| in |master_location|.""" | |
550 most_recent_build_num = None | |
551 | |
552 if master_location.startswith('http://'): | |
553 slave_url = master_location + urllib.quote(slave_name) | |
554 | |
555 url_contents = '' | |
556 fp = None | |
557 try: | |
558 fp = urllib2.urlopen(slave_url, timeout=60) | |
559 url_contents = fp.read() | |
560 except urllib2.URLError, e: | |
561 logging.exception('Error reading builder URL: %s', str(e)) | |
562 return None | |
563 finally: | |
564 if fp: | |
565 fp.close() | |
566 | |
567 matches = re.findall(r'/(\d+)/stop', url_contents) | |
568 if matches: | |
569 most_recent_build_num = int(matches[0]) | |
570 else: | |
571 matches = re.findall(r'#(\d+)</a></td>', url_contents) | |
572 if matches: | |
573 most_recent_build_num = sorted(map(int, matches), reverse=True)[0] | |
574 | |
575 else: | |
576 slave_path = os.path.join(master_location, | |
577 slave_name.translate(MANGLE_TRANSLATION)) | |
578 files = os.listdir(slave_path) | |
579 number_files = [int(filename) for filename in files if filename.isdigit()] | |
580 if number_files: | |
581 most_recent_build_num = sorted(number_files, reverse=True)[0] | |
582 | |
583 if most_recent_build_num: | |
584 logging.debug('%s most recent build number: %s', | |
585 slave_name, most_recent_build_num) | |
586 else: | |
587 logging.error('Could not identify latest build number for slave %s.', | |
588 slave_name) | |
589 | |
590 return most_recent_build_num | |
591 | |
592 | |
593 def UpdatePerfDataFiles(graph_dir, master_location): | |
594 """Updates the Chrome Endure graph data files with the latest test results. | |
595 | |
596 For each known Chrome Endure slave, we scan its latest test results looking | |
597 for any new test data. Any new data that is found is then appended to the | |
598 data files used to display the Chrome Endure graphs. | |
599 | |
600 Args: | |
601 graph_dir: A path to the graph directory. | |
602 master_location: A URL or a path to the build master data. | |
603 | |
604 Returns: | |
605 True if all graph data files are updated properly, or | |
606 False if any error occurred. | |
607 """ | |
608 slave_list = [] | |
609 for slave_name in CHROME_ENDURE_SLAVE_NAMES: | |
610 slave_info = {} | |
611 slave_info['slave_name'] = slave_name | |
612 slave_info['most_recent_build_num'] = None | |
613 slave_info['last_processed_build_num'] = None | |
614 slave_list.append(slave_info) | |
615 | |
616 # Identify the most recent build number for each slave. | |
617 logging.debug('Searching for latest build numbers for each slave...') | |
618 for slave in slave_list: | |
619 slave_name = slave['slave_name'] | |
620 slave['most_recent_build_num'] = GetMostRecentBuildNum( | |
621 master_location, slave_name) | |
622 | |
623 # Identify the last-processed build number for each slave. | |
624 logging.debug('Identifying last processed build numbers...') | |
625 if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE): | |
626 for slave_info in slave_list: | |
627 slave_info['last_processed_build_num'] = 0 | |
628 else: | |
629 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp: | |
630 file_contents = fp.read() | |
631 for match in re.findall(r'([^:]+):(\d+)', file_contents): | |
632 slave_name = match[0].strip() | |
633 last_processed_build_num = match[1].strip() | |
634 for slave_info in slave_list: | |
635 if slave_info['slave_name'] == slave_name: | |
636 slave_info['last_processed_build_num'] = int( | |
637 last_processed_build_num) | |
638 for slave_info in slave_list: | |
639 if not slave_info['last_processed_build_num']: | |
640 slave_info['last_processed_build_num'] = 0 | |
641 logging.debug('Done identifying last processed build numbers.') | |
642 | |
643 # For each Chrome Endure slave, process each build in-between the last | |
644 # processed build num and the most recent build num, inclusive. To process | |
645 # each one, first get the revision number for that build, then scan the test | |
646 # result stdio for any performance data, and add any new performance data to | |
647 # local files to be graphed. | |
648 for slave_info in slave_list: | |
649 logging.debug('Processing %s, builds %d-%d...', | |
650 slave_info['slave_name'], | |
651 slave_info['last_processed_build_num'], | |
652 slave_info['most_recent_build_num']) | |
653 curr_build_num = slave_info['last_processed_build_num'] | |
654 while curr_build_num <= slave_info['most_recent_build_num']: | |
655 if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num, | |
656 graph_dir, master_location): | |
657 # Do not give up. The first files might be removed by buildbot. | |
658 logging.warning('Logs do not exist in buildbot for #%d of %s.' % | |
659 (curr_build_num, slave_info['slave_name'])) | |
660 curr_build_num += 1 | |
661 | |
662 # Log the newly-processed build numbers. | |
663 logging.debug('Logging the newly-processed build numbers...') | |
664 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f: | |
665 for slave_info in slave_list: | |
666 f.write('%s:%s\n' % (slave_info['slave_name'], | |
667 slave_info['most_recent_build_num'])) | |
668 | |
669 return True | |
670 | |
671 | |
672 def GenerateIndexPage(graph_dir): | |
673 """Generates a summary (landing) page for the Chrome Endure graphs. | |
674 | |
675 Args: | |
676 graph_dir: A path to the graph directory. | |
677 """ | |
678 logging.debug('Generating new index.html page...') | |
679 | |
680 # Page header. | |
681 page = """ | |
682 <html> | |
683 | |
684 <head> | |
685 <title>Chrome Endure Overview</title> | |
686 <script language="javascript"> | |
687 function DisplayGraph(name, graph) { | |
688 document.write( | |
689 '<td><iframe scrolling="no" height="438" width="700" src="'); | |
690 document.write(name); | |
691 document.write('"></iframe></td>'); | |
692 } | |
693 </script> | |
694 </head> | |
695 | |
696 <body> | |
697 <center> | |
698 | |
699 <h1> | |
700 Chrome Endure | |
701 </h1> | |
702 """ | |
703 # Print current time. | |
704 page += '<p>Updated: %s</p>\n' % ( | |
705 time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z')) | |
706 | |
707 # Links for each webapp. | |
708 webapp_names = [x for x in os.listdir(graph_dir) if | |
709 x not in ['js', 'old_data', '.svn', '.git'] and | |
710 os.path.isdir(os.path.join(graph_dir, x))] | |
711 webapp_names = sorted(webapp_names) | |
712 | |
713 page += '<p> [' | |
714 for i, name in enumerate(webapp_names): | |
715 page += '<a href="#%s">%s</a>' % (name.upper(), name.upper()) | |
716 if i < len(webapp_names) - 1: | |
717 page += ' | ' | |
718 page += '] </p>\n' | |
719 | |
720 # Print out the data for each webapp. | |
721 for webapp_name in webapp_names: | |
722 page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(), | |
723 webapp_name.upper()) | |
724 | |
725 # Links for each test for this webapp. | |
726 test_names = [x for x in | |
727 os.listdir(os.path.join(graph_dir, webapp_name))] | |
728 test_names = sorted(test_names) | |
729 | |
730 page += '<p> [' | |
731 for i, name in enumerate(test_names): | |
732 page += '<a href="#%s">%s</a>' % (name, name) | |
733 if i < len(test_names) - 1: | |
734 page += ' | ' | |
735 page += '] </p>\n' | |
736 | |
737 # Print out the data for each test for this webapp. | |
738 for test_name in test_names: | |
739 # Get the set of graph names for this test. | |
740 graph_names = [x[:x.find('-summary.dat')] for x in | |
741 os.listdir(os.path.join(graph_dir, | |
742 webapp_name, test_name)) | |
743 if '-summary.dat' in x and '_EVENT_' not in x] | |
744 graph_names = sorted(graph_names) | |
745 | |
746 page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name) | |
747 page += '<table>\n' | |
748 | |
749 for i, graph_name in enumerate(graph_names): | |
750 if i % 2 == 0: | |
751 page += ' <tr>\n' | |
752 page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");' | |
753 '</script>\n' % (webapp_name, test_name, graph_name)) | |
754 if i % 2 == 1: | |
755 page += ' </tr>\n' | |
756 if len(graph_names) % 2 == 1: | |
757 page += ' </tr>\n' | |
758 page += '</table>\n' | |
759 | |
760 # Page footer. | |
761 page += """ | |
762 </center> | |
763 </body> | |
764 | |
765 </html> | |
766 """ | |
767 | |
768 index_file = os.path.join(graph_dir, 'index.html') | |
769 with open(index_file, 'w') as f: | |
770 f.write(page) | |
771 os.chmod(index_file, 0755) | |
772 | |
773 | |
774 def main(): | |
775 parser = optparse.OptionParser() | |
776 parser.add_option( | |
777 '-v', '--verbose', action='store_true', default=False, | |
778 help='Use verbose logging.') | |
779 parser.add_option( | |
780 '-s', '--stdin', action='store_true', default=False, | |
781 help='Input from stdin instead of slaves for testing this script.') | |
782 parser.add_option( | |
783 '-b', '--buildbot', dest='buildbot', metavar="BUILDBOT", | |
784 default=BUILDER_URL_BASE, | |
785 help='Use log files in a buildbot at BUILDBOT. BUILDBOT can be a ' | |
786 'buildbot\'s builder URL or a local path to a buildbot directory. ' | |
787 'Both an absolute path and a relative path are available, e.g. ' | |
788 '"/home/chrome-bot/buildbot" or "../buildbot". ' | |
789 '[default: %default]') | |
790 parser.add_option( | |
791 '-g', '--graph', dest='graph_dir', metavar="DIR", default=LOCAL_GRAPH_DIR, | |
792 help='Output graph data files to DIR. [default: %default]') | |
793 options, _ = parser.parse_args(sys.argv) | |
794 | |
795 logging_level = logging.DEBUG if options.verbose else logging.INFO | |
796 logging.basicConfig(level=logging_level, | |
797 format='[%(asctime)s] %(levelname)s: %(message)s') | |
798 | |
799 if options.stdin: | |
800 content = sys.stdin.read() | |
801 UpdatePerfDataFromFetchedContent( | |
802 '12345', content, 'webapp', 'test', options.graph_dir) | |
803 else: | |
804 if options.buildbot.startswith('http://'): | |
805 master_location = options.buildbot | |
806 else: | |
807 build_dir = os.path.join(options.buildbot, 'build') | |
808 third_party_dir = os.path.join(build_dir, 'third_party') | |
809 sys.path.append(third_party_dir) | |
810 sys.path.append(os.path.join(third_party_dir, 'buildbot_8_4p1')) | |
811 sys.path.append(os.path.join(third_party_dir, 'twisted_10_2')) | |
812 master_location = os.path.join(build_dir, 'masters', | |
813 'master.chromium.endure') | |
814 success = UpdatePerfDataFiles(options.graph_dir, master_location) | |
815 if not success: | |
816 logging.error('Failed to update perf data files.') | |
817 sys.exit(0) | |
818 | |
819 GenerateIndexPage(options.graph_dir) | |
820 logging.debug('All done!') | |
821 | |
822 | |
823 if __name__ == '__main__': | |
824 main() | |
OLD | NEW |