OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Script to parse perf data from Chrome Endure test executions, to be graphed. | |
7 | |
8 This script connects via HTTP to a buildbot master in order to scrape and parse | |
9 perf data from Chrome Endure tests that have been run. The perf data is then | |
10 stored in local text files to be graphed by the Chrome Endure graphing code. | |
11 | |
12 It is assumed that any Chrome Endure tests that show up on the waterfall have | |
13 names that are of the following form: | |
14 | |
15 "endure_<webapp_name>_test <test_name>" (non-Web Page Replay tests) | |
16 | |
17 or | |
18 | |
19 "endure_<webapp_name>_wpr_test <test_name>" (Web Page Replay tests) | |
20 | |
21 For example: "endure_gmail_wpr_test testGmailComposeDiscard" | |
22 """ | |
23 | |
24 import getpass | |
25 import logging | |
26 import optparse | |
27 import os | |
28 import re | |
29 import simplejson | |
30 import socket | |
31 import sys | |
32 import time | |
33 import urllib | |
34 import urllib2 | |
35 | |
36 | |
37 CHROME_ENDURE_SLAVE_NAMES = [ | |
38 'Linux (perf0)', | |
39 'Linux (perf1)', | |
40 'Linux (perf2)', | |
41 'Linux (perf3)', | |
42 'Linux (perf4)', | |
43 ] | |
44 | |
45 BUILDER_URL_BASE = 'http://chromegw.corp.google.com/i/chromium.pyauto/builders/' | |
46 LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__), | |
47 '_parser_last_processed.txt') | |
48 LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser() | |
49 | |
50 | |
51 def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir): | |
52 """Sets up the directory containing results for a particular test, if needed. | |
53 | |
54 Args: | |
55 webapp_name: The string name of the webapp associated with the given test. | |
56 test_name: The string name of the test. | |
57 dest_dir: The name of the destination directory that needs to be set up. | |
58 """ | |
59 if not os.path.exists(dest_dir): | |
60 os.mkdir(dest_dir) # Test name directory. | |
61 os.chmod(dest_dir, 0755) | |
62 | |
63 # Create config file. | |
64 config_file = os.path.join(dest_dir, 'config.js') | |
65 if not os.path.exists(config_file): | |
66 with open(config_file, 'w') as f: | |
67 f.write('var Config = {\n') | |
68 f.write('buildslave: "Chrome Endure Bots",\n') | |
69 f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(), | |
70 test_name)) | |
71 f.write('};\n') | |
72 os.chmod(config_file, 0755) | |
73 | |
74 # Set up symbolic links to the real graphing files. | |
75 link_file = os.path.join(dest_dir, 'index.html') | |
76 if not os.path.exists(link_file): | |
77 command = 'ln -s ../../endure_plotter.html ' + link_file | |
78 os.system(command) | |
79 os.chmod(link_file, 0755) | |
80 link_file = os.path.join(dest_dir, 'endure_plotter.js') | |
81 if not os.path.exists(link_file): | |
82 command = 'ln -s ../../endure_plotter.js ' + link_file | |
83 os.system(command) | |
84 os.chmod(link_file, 0755) | |
85 link_file = os.path.join(dest_dir, 'js') | |
86 if not os.path.exists(link_file): | |
87 command = 'ln -s ../../js ' + link_file | |
88 os.system(command) | |
89 os.chmod(link_file, 0755) | |
90 | |
91 | |
92 def WriteToDataFile(new_line, existing_lines, revision, data_file): | |
93 """Writes a new entry to an existing perf data file to be graphed. | |
94 | |
95 If there's an existing line with the same revision number, overwrite its data | |
96 with the new line. Else, prepend the info for the new revision. | |
97 | |
98 Args: | |
99 new_line: A dictionary representing perf information for the new entry. | |
100 existing_lines: A list of string lines from the existing perf data file. | |
101 revision: The string revision number associated with the new perf entry. | |
102 data_file: The string name of the perf data file to which to write. | |
103 """ | |
104 overwritten = False | |
105 for i, line in enumerate(existing_lines): | |
106 line_dict = eval(line) | |
Dai Mikurube (NOT FULLTIME)
2012/09/20 04:26:08
eval => simplejson.loads ?
dennis_jeffrey
2012/09/25 01:21:21
Done.
| |
107 if line_dict['rev'] == revision: | |
108 existing_lines[i] = str(new_line) | |
Dai Mikurube (NOT FULLTIME)
2012/09/20 04:26:08
str => simplejson.dumps ?
Dai Mikurube (NOT FULLTIME)
2012/09/20 06:58:48
It looks overwriting completely, right?
I remembe
dennis_jeffrey
2012/09/25 01:21:21
Done.
dennis_jeffrey
2012/09/25 01:21:21
You're right that the current script should be mer
| |
109 overwritten = True | |
110 break | |
111 elif int(line_dict['rev']) < int(revision): | |
112 break | |
113 if not overwritten: | |
114 existing_lines.insert(0, str(new_line)) | |
Dai Mikurube (NOT FULLTIME)
2012/09/20 04:26:08
str => simplejson.dumps ?
dennis_jeffrey
2012/09/25 01:21:21
Done.
| |
115 | |
116 with open(data_file, 'w') as f: | |
117 f.write('\n'.join(existing_lines)) | |
118 os.chmod(data_file, 0755) | |
119 | |
120 | |
121 def OutputPerfData(revision, graph_name, description, value, units, units_x, | |
122 dest_dir): | |
123 """Outputs perf data to a local text file to be graphed. | |
124 | |
125 Args: | |
126 revision: The string revision number associated with the perf data. | |
127 graph_name: The string name of the graph on which to plot the data. | |
128 description: A string description of the perf value to be graphed. | |
129 value: Either a single data value to be graphed, or a list of 2-tuples | |
130 representing (x, y) points to be graphed for long-running tests. | |
131 units: The string description for the y-axis units on the graph. | |
132 units_x: The string description for the x-axis units on the graph. Should | |
133 be set to None if the results are not for long-running graphs. | |
134 dest_dir: The name of the destination directory to which to write. | |
135 """ | |
136 # Update graphs.dat, which contains metadata associated with each graph. | |
137 existing_graphs = [] | |
138 graphs_file = os.path.join(dest_dir, 'graphs.dat') | |
139 if os.path.exists(graphs_file): | |
140 with open(graphs_file, 'r') as f: | |
141 existing_graphs = simplejson.loads(f.read()) | |
142 is_new_graph = True | |
143 for graph in existing_graphs: | |
144 if graph['name'] == graph_name: | |
145 is_new_graph = False | |
146 break | |
147 if is_new_graph: | |
148 new_graph = { | |
149 'name': graph_name, | |
150 'units': units, | |
151 'important': False, | |
152 } | |
153 if units_x: | |
154 new_graph['units_x'] = units_x | |
155 existing_graphs.append(new_graph) | |
156 existing_graphs = sorted(existing_graphs, key=lambda x: x['name']) | |
157 with open(graphs_file, 'w') as f: | |
158 f.write(simplejson.dumps(existing_graphs, indent=2)) | |
159 os.chmod(graphs_file, 0755) | |
160 | |
161 # Update summary data file, containing the actual data to be graphed. | |
162 data_file_name = graph_name + '-summary.dat' | |
163 existing_lines = [] | |
164 data_file = os.path.join(dest_dir, data_file_name) | |
165 if os.path.exists(data_file): | |
166 with open(data_file, 'r') as f: | |
167 existing_lines = f.readlines() | |
168 existing_lines = map(lambda x: x.strip(), existing_lines) | |
169 if units_x: | |
170 points = [] | |
171 for point in value: | |
172 points.append([str(point[0]), str(point[1])]) | |
173 new_traces = { | |
174 description: points | |
175 } | |
176 else: | |
177 new_traces = { | |
178 description: [str(value), str(0.0)] | |
179 } | |
180 new_line = { | |
181 'traces': new_traces, | |
182 'rev': revision | |
183 } | |
184 | |
185 WriteToDataFile(new_line, existing_lines, revision, data_file) | |
186 | |
187 | |
188 def OutputEventData(revision, description, event_list, dest_dir): | |
189 """Outputs event data to a local text file to be graphed. | |
190 | |
191 Args: | |
192 revision: The string revision number associated with the event data. | |
193 description: A string description of the event values to be graphed. | |
194 event_list: An array of tuples representing event data to be graphed. | |
195 dest_dir: The name of the destination directory to which to write. | |
196 """ | |
197 data_file_name = '_EVENT_-summary.dat' | |
198 existing_lines = [] | |
199 data_file = os.path.join(dest_dir, data_file_name) | |
200 if os.path.exists(data_file): | |
201 with open(data_file, 'r') as f: | |
202 existing_lines = f.readlines() | |
203 existing_lines = map(lambda x: x.strip(), existing_lines) | |
204 | |
205 value_list = [] | |
206 for event_time, event_data in event_list: | |
207 value_list.append([str(event_time), event_data]) | |
208 new_events = { | |
209 description: value_list | |
210 } | |
211 | |
212 new_line = { | |
213 'rev': revision, | |
214 'events': new_events | |
215 } | |
216 | |
217 WriteToDataFile(new_line, existing_lines, revision, data_file) | |
218 | |
219 | |
220 def UpdatePerfDataForSlaveAndBuild(slave_info, build_num): | |
221 """Process updated perf data for a particular slave and build number. | |
222 | |
223 Args: | |
224 slave_info: A dictionary containing information about the slave to process. | |
225 build_num: The particular build number on the slave to process. | |
226 | |
227 Returns: | |
228 True if the perf data for the given slave/build is updated properly, or | |
229 False if any critical error occurred. | |
230 """ | |
231 logging.debug(' %s, build %d.', slave_info['slave_name'], build_num) | |
232 build_url = (BUILDER_URL_BASE + urllib.quote(slave_info['slave_name']) + | |
233 '/builds/' + str(build_num)) | |
234 | |
235 url_contents = '' | |
236 fp = None | |
237 try: | |
238 fp = urllib2.urlopen(build_url, timeout=60) | |
239 url_contents = fp.read() | |
240 except urllib2.URLError, e: | |
241 logging.exception('Error reading build URL "%s": %s', build_url, str(e)) | |
242 return False | |
243 finally: | |
244 if fp: | |
245 fp.close() | |
246 | |
247 # Extract the revision number for this build. | |
248 revision = re.findall( | |
249 r'<td class="left">got_revision</td>\s+<td>(\d+)</td>\s+<td>Source</td>', | |
250 url_contents) | |
251 if not revision: | |
252 logging.warning('Could not get revision number. Assuming build is too new ' | |
253 'or was cancelled.') | |
254 return True # Do not fail the script in this case; continue with next one. | |
255 revision = revision[0] | |
256 | |
257 # Extract any Chrome Endure stdio links for this build. | |
258 stdio_urls = [] | |
259 links = re.findall(r'(/steps/endure[^/]+/logs/stdio)', url_contents) | |
260 for link in links: | |
261 link_unquoted = urllib.unquote(link) | |
262 found_wpr_result = False | |
263 match = re.findall(r'endure_([^_]+)_test ([^/]+)/', link_unquoted) | |
264 if not match: | |
265 match = re.findall(r'endure_([^_]+)_wpr_test ([^/]+)/', link_unquoted) | |
266 if match: | |
267 found_wpr_result = True | |
268 else: | |
269 logging.error('Test name not in expected format in link: ' + | |
270 link_unquoted) | |
271 return False | |
272 match = match[0] | |
273 webapp_name = match[0] + '_wpr' if found_wpr_result else match[0] | |
274 test_name = match[1] | |
275 stdio_urls.append({ | |
276 'link': build_url + link + '/text', | |
277 'webapp_name': webapp_name, | |
278 'test_name': test_name, | |
279 }) | |
280 | |
281 # For each test stdio link, parse it and look for new perf data to be graphed. | |
282 for stdio_url_data in stdio_urls: | |
283 stdio_url = stdio_url_data['link'] | |
284 url_contents = '' | |
285 fp = None | |
286 try: | |
287 fp = urllib2.urlopen(stdio_url, timeout=60) | |
288 # Since in-progress test output is sent chunked, there's no EOF. We need | |
289 # to specially handle this case so we don't hang here waiting for the | |
290 # test to complete. | |
291 start_time = time.time() | |
292 while True: | |
293 data = fp.read(1024) | |
294 if not data: | |
295 break | |
296 url_contents += data | |
297 if time.time() - start_time >= 30: # Read for at most 30 seconds. | |
298 break | |
299 except (urllib2.URLError, socket.error), e: | |
300 # Issue warning but continue to the next stdio link. | |
301 logging.warning('Error reading test stdio URL "%s": %s', stdio_url, | |
302 str(e)) | |
303 finally: | |
304 if fp: | |
305 fp.close() | |
306 | |
307 perf_data_raw = [] | |
308 | |
309 def AppendRawPerfData(graph_name, description, value, units, units_x, | |
310 webapp_name, test_name): | |
311 perf_data_raw.append({ | |
312 'graph_name': graph_name, | |
313 'description': description, | |
314 'value': value, | |
315 'units': units, | |
316 'units_x': units_x, | |
317 'webapp_name': webapp_name, | |
318 'test_name': test_name, | |
319 }) | |
320 | |
321 # First scan for short-running perf test results. | |
322 for match in re.findall( | |
323 r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', url_contents): | |
324 AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], None, | |
325 stdio_url_data['webapp_name'], | |
326 stdio_url_data['webapp_name']) | |
327 | |
328 # Next scan for long-running perf test results. | |
329 for match in re.findall( | |
330 r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', url_contents): | |
331 AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], match[4], | |
332 stdio_url_data['webapp_name'], | |
333 stdio_url_data['test_name']) | |
334 | |
335 # Next scan for events in the test results. | |
336 for match in re.findall( | |
337 r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', url_contents): | |
338 AppendRawPerfData('_EVENT_', match[0], eval(match[1]), None, None, | |
339 stdio_url_data['webapp_name'], | |
340 stdio_url_data['test_name']) | |
341 | |
342 # For each graph_name/description pair that refers to a long-running test | |
343 # result or an event, concatenate all the results together (assume results | |
344 # in the input file are in the correct order). For short-running test | |
345 # results, keep just one if more than one is specified. | |
346 perf_data = {} # Maps a graph-line key to a perf data dictionary. | |
347 for data in perf_data_raw: | |
348 key = data['graph_name'] + '|' + data['description'] | |
349 if data['graph_name'] != '_EVENT_' and not data['units_x']: | |
350 # Short-running test result. | |
351 perf_data[key] = data | |
352 else: | |
353 # Long-running test result or event. | |
354 if key in perf_data: | |
355 perf_data[key]['value'] += data['value'] | |
356 else: | |
357 perf_data[key] = data | |
358 | |
359 # Finally, for each graph-line in |perf_data|, update the associated local | |
360 # graph data files if necessary. | |
361 for perf_data_key in perf_data: | |
362 perf_data_dict = perf_data[perf_data_key] | |
363 | |
364 dest_dir = os.path.join(LOCAL_GRAPH_DIR, perf_data_dict['webapp_name']) | |
365 if not os.path.exists(dest_dir): | |
366 os.mkdir(dest_dir) # Webapp name directory. | |
367 os.chmod(dest_dir, 0755) | |
368 dest_dir = os.path.join(dest_dir, perf_data_dict['test_name']) | |
369 | |
370 SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'], | |
371 perf_data_dict['test_name'], dest_dir) | |
372 if perf_data_dict['graph_name'] == '_EVENT_': | |
373 OutputEventData(revision, perf_data_dict['description'], | |
374 perf_data_dict['value'], dest_dir) | |
375 else: | |
376 OutputPerfData(revision, perf_data_dict['graph_name'], | |
377 perf_data_dict['description'], perf_data_dict['value'], | |
378 perf_data_dict['units'], perf_data_dict['units_x'], | |
379 dest_dir) | |
380 | |
381 return True | |
382 | |
383 | |
384 def UpdatePerfDataFiles(): | |
385 """Updates the Chrome Endure graph data files with the latest test results. | |
386 | |
387 For each known Chrome Endure slave, we scan its latest test results looking | |
388 for any new test data. Any new data that is found is then appended to the | |
389 data files used to display the Chrome Endure graphs. | |
390 | |
391 Returns: | |
392 True if all graph data files are updated properly, or | |
393 False if any error occurred. | |
394 """ | |
395 slave_list = [] | |
396 for slave_name in CHROME_ENDURE_SLAVE_NAMES: | |
397 slave_info = {} | |
398 slave_info['slave_name'] = slave_name | |
399 slave_info['most_recent_build_num'] = None | |
400 slave_info['last_processed_build_num'] = None | |
401 slave_list.append(slave_info) | |
402 | |
403 # Identify the most recent build number for each slave. | |
404 logging.debug('Searching for latest build numbers for each slave...') | |
405 for slave in slave_list: | |
406 slave_name = slave['slave_name'] | |
407 slave_url = BUILDER_URL_BASE + urllib.quote(slave_name) | |
408 | |
409 url_contents = '' | |
410 fp = None | |
411 try: | |
412 fp = urllib2.urlopen(slave_url, timeout=60) | |
413 url_contents = fp.read() | |
414 except urllib2.URLError, e: | |
415 logging.exception('Error reading builder URL: %s', str(e)) | |
416 return False | |
417 finally: | |
418 if fp: | |
419 fp.close() | |
420 | |
421 matches = re.findall(r'/(\d+)/stop', url_contents) | |
422 if matches: | |
423 slave['most_recent_build_num'] = int(matches[0]) | |
424 else: | |
425 matches = re.findall(r'#(\d+)</a></td>', url_contents) | |
426 if matches: | |
427 slave['most_recent_build_num'] = sorted(map(int, matches), | |
428 reverse=True)[0] | |
429 else: | |
430 logging.error('Could not identify latest build number for slave %s.', | |
431 slave_name) | |
432 return False | |
433 | |
434 logging.debug('%s most recent build number: %s', slave_name, | |
435 slave['most_recent_build_num']) | |
436 | |
437 # Identify the last-processed build number for each slave. | |
438 logging.debug('Identifying last processed build numbers...') | |
439 if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE): | |
440 for slave_info in slave_list: | |
441 slave_info['last_processed_build_num'] = 0 | |
442 else: | |
443 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp: | |
444 file_contents = fp.read() | |
445 for match in re.findall(r'([^:]+):(\d+)', file_contents): | |
446 slave_name = match[0].strip() | |
447 last_processed_build_num = match[1].strip() | |
448 for slave_info in slave_list: | |
449 if slave_info['slave_name'] == slave_name: | |
450 slave_info['last_processed_build_num'] = int( | |
451 last_processed_build_num) | |
452 for slave_info in slave_list: | |
453 if not slave_info['last_processed_build_num']: | |
454 slave_info['last_processed_build_num'] = 0 | |
455 logging.debug('Done identifying last processed build numbers.') | |
456 | |
457 # For each Chrome Endure slave, process each build in-between the last | |
458 # processed build num and the most recent build num, inclusive. To process | |
459 # each one, first get the revision number for that build, then scan the test | |
460 # result stdio for any performance data, and add any new performance data to | |
461 # local files to be graphed. | |
462 for slave_info in slave_list: | |
463 logging.debug('Processing %s, builds %d-%d...', | |
464 slave_info['slave_name'], | |
465 slave_info['last_processed_build_num'], | |
466 slave_info['most_recent_build_num']) | |
467 curr_build_num = slave_info['last_processed_build_num'] | |
468 while curr_build_num <= slave_info['most_recent_build_num']: | |
469 if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num): | |
470 return False | |
471 curr_build_num += 1 | |
472 | |
473 # Log the newly-processed build numbers. | |
474 logging.debug('Logging the newly-processed build numbers...') | |
475 with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f: | |
476 for slave_info in slave_list: | |
477 f.write('%s:%s\n' % (slave_info['slave_name'], | |
478 slave_info['most_recent_build_num'])) | |
479 | |
480 return True | |
481 | |
482 | |
483 def GenerateIndexPage(): | |
484 """Generates a summary (landing) page for the Chrome Endure graphs.""" | |
485 logging.debug('Generating new index.html page...') | |
486 | |
487 # Page header. | |
488 page = """ | |
489 <html> | |
490 | |
491 <head> | |
492 <title>Chrome Endure Overview</title> | |
493 <script language="javascript"> | |
494 function DisplayGraph(name, graph) { | |
495 document.write( | |
496 '<td><iframe scrolling="no" height="438" width="700" src="'); | |
497 document.write(name); | |
498 document.write('"></iframe></td>'); | |
499 } | |
500 </script> | |
501 </head> | |
502 | |
503 <body> | |
504 <center> | |
505 | |
506 <h1> | |
507 Chrome Endure | |
508 </h1> | |
509 """ | |
510 # Print current time. | |
511 page += '<p>Updated: %s</p>\n' % ( | |
512 time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z')) | |
513 | |
514 # Links for each webapp. | |
515 webapp_names = [x for x in os.listdir(LOCAL_GRAPH_DIR) if | |
516 x not in ['js', 'old_data'] and | |
517 os.path.isdir(os.path.join(LOCAL_GRAPH_DIR, x))] | |
518 webapp_names = sorted(webapp_names) | |
519 | |
520 page += '<p> [' | |
521 for i, name in enumerate(webapp_names): | |
522 page += '<a href="#%s">%s</a>' % (name.upper(), name.upper()) | |
523 if i < len(webapp_names) - 1: | |
524 page += ' | ' | |
525 page += '] </p>\n' | |
526 | |
527 # Print out the data for each webapp. | |
528 for webapp_name in webapp_names: | |
529 page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(), | |
530 webapp_name.upper()) | |
531 | |
532 # Links for each test for this webapp. | |
533 test_names = [x for x in | |
534 os.listdir(os.path.join(LOCAL_GRAPH_DIR, webapp_name))] | |
535 test_names = sorted(test_names) | |
536 | |
537 page += '<p> [' | |
538 for i, name in enumerate(test_names): | |
539 page += '<a href="#%s">%s</a>' % (name, name) | |
540 if i < len(test_names) - 1: | |
541 page += ' | ' | |
542 page += '] </p>\n' | |
543 | |
544 # Print out the data for each test for this webapp. | |
545 for test_name in test_names: | |
546 # Get the set of graph names for this test. | |
547 graph_names = [x[:x.find('-summary.dat')] for x in | |
548 os.listdir(os.path.join(LOCAL_GRAPH_DIR, | |
549 webapp_name, test_name)) | |
550 if '-summary.dat' in x and '_EVENT_' not in x] | |
551 graph_names = sorted(graph_names) | |
552 | |
553 page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name) | |
554 page += '<table>\n' | |
555 | |
556 for i, graph_name in enumerate(graph_names): | |
557 if i % 2 == 0: | |
558 page += ' <tr>\n' | |
559 page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");' | |
560 '</script>\n' % (webapp_name, test_name, graph_name)) | |
561 if i % 2 == 1: | |
562 page += ' </tr>\n' | |
563 if len(graph_names) % 2 == 1: | |
564 page += ' </tr>\n' | |
565 page += '</table>\n' | |
566 | |
567 # Page footer. | |
568 page += """ | |
569 </center> | |
570 </body> | |
571 | |
572 </html> | |
573 """ | |
574 | |
575 index_file = os.path.join(LOCAL_GRAPH_DIR, 'index.html') | |
576 with open(index_file, 'w') as f: | |
577 f.write(page) | |
578 os.chmod(index_file, 0755) | |
579 | |
580 | |
581 def main(): | |
582 parser = optparse.OptionParser() | |
583 parser.add_option( | |
584 '-v', '--verbose', action='store_true', default=False, | |
585 help='Use verbose logging.') | |
586 options, _ = parser.parse_args(sys.argv) | |
587 | |
588 logging_level = logging.DEBUG if options.verbose else logging.INFO | |
589 logging.basicConfig(level=logging_level, | |
590 format='[%(asctime)s] %(levelname)s: %(message)s') | |
591 | |
592 success = UpdatePerfDataFiles() | |
593 if not success: | |
594 logging.error('Failed to update perf data files.') | |
595 sys.exit(0) | |
596 | |
597 GenerateIndexPage() | |
598 logging.debug('All done!') | |
599 | |
600 | |
601 if __name__ == '__main__': | |
602 main() | |
OLD | NEW |