OLD | NEW |
---|---|
(Empty) | |
1 #! /usr/bin/env python | |
2 # Copyright 2016 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Instructs Chrome to load series of web pages and reports results. | |
7 | |
8 When running Chrome is sandwiched between preprocessed disk caches and | |
9 WepPageReplay serving all connections. | |
10 | |
11 TODO(pasko): implement cache preparation and WPR. | |
12 """ | |
13 | |
14 import argparse | |
15 import logging | |
16 import os | |
17 import sys | |
18 | |
19 _SRC_DIR = os.path.abspath(os.path.join( | |
20 os.path.dirname(__file__), '..', '..', '..')) | |
21 | |
22 sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil')) | |
23 from devil.android import device_utils | |
24 | |
25 sys.path.append(os.path.join(_SRC_DIR, 'build', 'android')) | |
26 import devil_chromium | |
27 | |
28 import device_setup | |
29 import devtools_monitor | |
30 import json | |
31 import page_track | |
32 import tracing | |
33 | |
34 | |
35 _JOB_SEARCH_PATH = 'sandwich_jobs' | |
36 | |
37 | |
38 def _ReadUrlsFromJobDescription(job_name): | |
39 """Retrieves the list of URLs associated with the job name.""" | |
40 try: | |
41 # Extra sugar: attempt to load from a relative path. | |
42 json_file_name = os.path.join(os.path.dirname(__file__), _JOB_SEARCH_PATH, | |
43 job_name) | |
44 with open(json_file_name) as f: | |
45 json_data = json.load(f) | |
46 except IOError: | |
47 # Attempt to read by regular file name. | |
48 with open(job_name) as f: | |
49 json_data = json.load(f) | |
50 | |
51 key = 'urls' | |
52 if json_data and key in json_data: | |
53 url_list = json_data[key] | |
54 if isinstance(url_list, list) and len(url_list) > 0: | |
55 return url_list | |
56 raise Exception('Job description does not define a list named "urls"') | |
57 | |
58 | |
59 def _SaveChromeTrace(events, directory, subdirectory): | |
60 """Saves the trace events, ignores IO errors. | |
61 | |
62 Args: | |
63 events: a dict as returned by TracingTrack.ToJsonDict() | |
64 directory: directory name contining all traces | |
65 subdirectory: directory name to create this particular trace in | |
66 """ | |
67 target_directory = os.path.join(directory, subdirectory) | |
68 file_name = os.path.join(target_directory, 'trace.json') | |
69 try: | |
70 os.makedirs(target_directory) | |
71 with open(file_name, 'w') as f: | |
mattcary
2016/01/28 15:35:35
Could use gzip.GzipFile if we get concerned about
pasko
2016/01/28 15:48:52
Thanks, noticed that it is done in another script.
| |
72 json.dump({'traceEvents': events['events'], 'metadata': {}}, f) | |
73 except IOError: | |
74 logging.warning('Could not save a trace: %s' % file_name) | |
75 # Swallow the exception. | |
76 | |
77 | |
78 def main(): | |
79 logging.basicConfig(level=logging.INFO) | |
80 devil_chromium.Initialize() | |
81 | |
82 parser = argparse.ArgumentParser() | |
83 parser.add_argument('--job', required=True, | |
84 help='JSON file with job description.') | |
85 parser.add_argument('--output', required=True, | |
86 help='Name of output directory to create.') | |
87 parser.add_argument('--repeat', default='1', | |
mattcary
2016/01/28 15:35:34
Setting type=int avoids the int(args.repeat) conve
pasko
2016/01/28 15:48:52
oh nice, done
| |
88 help='How many times to run the job') | |
89 args = parser.parse_args() | |
90 | |
91 try: | |
92 os.makedirs(args.output) | |
93 except OSError: | |
94 logging.error('Cannot create directory for results: %s' % args.output) | |
95 raise | |
96 | |
97 job_urls = _ReadUrlsFromJobDescription(args.job) | |
98 device = device_utils.DeviceUtils.HealthyDevices()[0] | |
99 pages_loaded = 0 | |
100 for iteration in xrange(0, int(args.repeat)): | |
mattcary
2016/01/28 15:35:35
0, unnecessary for xrange.
pasko
2016/01/28 15:48:52
Done
| |
101 for url in job_urls: | |
102 with device_setup.DeviceConnection(device) as connection: | |
103 page = page_track.PageTrack(connection) | |
104 tracing_track = tracing.TracingTrack(connection, | |
105 categories='blink,cc,netlog,renderer.scheduler,toplevel,v8') | |
106 connection.SetUpMonitoring() | |
107 connection.SendAndIgnoreResponse('Page.navigate', {'url': url}) | |
108 connection.StartMonitoring() | |
109 pages_loaded += 1 | |
110 _SaveChromeTrace(tracing_track.ToJsonDict(), args.output, | |
111 str(pages_loaded)) | |
112 | |
113 | |
114 if __name__ == '__main__': | |
115 sys.exit(main()) | |
OLD | NEW |