Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(233)

Side by Side Diff: testing/scripts/run_telemetry_benchmark_as_googletest.py

Issue 2366673002: Updating benchmark script to only write chartjson when flag present. (Closed)
Patch Set: Using argparse api Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs an isolate bundled Telemetry benchmark. 6 """Runs an isolate bundled Telemetry benchmark.
7 7
8 This script attempts to emulate the contract of gtest-style tests 8 This script attempts to emulate the contract of gtest-style tests
9 invoked via recipes. The main contract is that the caller passes the 9 invoked via recipes. The main contract is that the caller passes the
10 argument: 10 argument:
(...skipping 27 matching lines...) Expand all
38 # (it seems to unset DISPLAY). 38 # (it seems to unset DISPLAY).
39 CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX' 39 CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
40 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' 40 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
41 41
42 def main(): 42 def main():
43 parser = argparse.ArgumentParser() 43 parser = argparse.ArgumentParser()
44 parser.add_argument( 44 parser.add_argument(
45 '--isolated-script-test-output', type=argparse.FileType('w'), 45 '--isolated-script-test-output', type=argparse.FileType('w'),
46 required=True) 46 required=True)
47 parser.add_argument( 47 parser.add_argument(
48 '--isolated-script-test-chartjson-output', type=argparse.FileType('w'), 48 '--isolated-script-test-chartjson-output', required=False)
49 required=False)
50 parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') 49 parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
51 args, rest_args = parser.parse_known_args() 50 args, rest_args = parser.parse_known_args()
52 xvfb_proc = None 51 xvfb_proc = None
53 openbox_proc = None 52 openbox_proc = None
54 xcompmgr_proc = None 53 xcompmgr_proc = None
55 env = os.environ.copy() 54 env = os.environ.copy()
56 # Assume we want to set up the sandbox environment variables all the 55 # Assume we want to set up the sandbox environment variables all the
57 # time; doing so is harmless on non-Linux platforms and is needed 56 # time; doing so is harmless on non-Linux platforms and is needed
58 # all the time on Linux. 57 # all the time on Linux.
59 env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH 58 env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
60 if args.xvfb and xvfb.should_start_xvfb(env): 59 if args.xvfb and xvfb.should_start_xvfb(env):
61 xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, 60 xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env,
62 build_dir='.') 61 build_dir='.')
63 assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' 62 assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb'
64 try: 63 try:
65 tempfile_dir = tempfile.mkdtemp('telemetry') 64 tempfile_dir = tempfile.mkdtemp('telemetry')
66 valid = True 65 valid = True
67 failures = [] 66 failures = []
68 chartjson = (args.isolated_script_test_chartjson_output is not None and 67 chartjson_results_present = '--output-format=chartjson' in rest_args
69 '--output-format=chartjson' in rest_args)
70 chartresults = None 68 chartresults = None
71 try: 69 try:
72 rc = common.run_command([sys.executable] + rest_args + [ 70 rc = common.run_command([sys.executable] + rest_args + [
73 '--output-dir', tempfile_dir, 71 '--output-dir', tempfile_dir,
74 '--output-format=json' 72 '--output-format=json'
75 ], env=env) 73 ], env=env)
76 tempfile_name = os.path.join(tempfile_dir, 'results.json') 74 tempfile_name = os.path.join(tempfile_dir, 'results.json')
77 with open(tempfile_name) as f: 75 with open(tempfile_name) as f:
78 results = json.load(f) 76 results = json.load(f)
79 for value in results['per_page_values']: 77 for value in results['per_page_values']:
80 if value['type'] == 'failure': 78 if value['type'] == 'failure':
81 failures.append(results['pages'][str(value['page_id'])]['name']) 79 failures.append(results['pages'][str(value['page_id'])]['name'])
82 valid = bool(rc == 0 or failures) 80 valid = bool(rc == 0 or failures)
83 # If we have also output chartjson read it in and return it. 81 # If we have also output chartjson read it in and return it.
84 # results-chart.json is the file name output by telemetry when the 82 # results-chart.json is the file name output by telemetry when the
85 # chartjson output format is included 83 # chartjson output format is included
86 if chartjson: 84 if chartjson_results_present:
87 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json') 85 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
88 with open(chart_tempfile_name) as f: 86 with open(chart_tempfile_name) as f:
89 chartresults = json.load(f) 87 chartresults = json.load(f)
90 except Exception: 88 except Exception:
91 traceback.print_exc() 89 traceback.print_exc()
92 valid = False 90 valid = False
93 finally: 91 finally:
94 shutil.rmtree(tempfile_dir) 92 shutil.rmtree(tempfile_dir)
95 93
96 if not valid and not failures: 94 if not valid and not failures:
97 failures = ['(entire test suite)'] 95 failures = ['(entire test suite)']
98 if rc == 0: 96 if rc == 0:
99 rc = 1 # Signal an abnormal exit. 97 rc = 1 # Signal an abnormal exit.
100 98
101 if chartjson: 99 if chartjson_results_present and args.isolated_script_test_chartjson_output:
102 json.dump(chartresults, args.isolated_script_test_chartjson_output) 100 chartjson_output_file = \
101 open(args.isolated_script_test_chartjson_output, 'w')
102 json.dump(chartresults, chartjson_output_file)
103 103
104 json.dump({ 104 json.dump({
105 'valid': valid, 105 'valid': valid,
106 'failures': failures 106 'failures': failures
107 }, args.isolated_script_test_output) 107 }, args.isolated_script_test_output)
108 return rc 108 return rc
109 109
110 finally: 110 finally:
111 xvfb.kill(xvfb_proc) 111 xvfb.kill(xvfb_proc)
112 xvfb.kill(openbox_proc) 112 xvfb.kill(openbox_proc)
113 xvfb.kill(xcompmgr_proc) 113 xvfb.kill(xcompmgr_proc)
114 114
115 115
116 # This is not really a "script test" so does not need to manually add 116 # This is not really a "script test" so does not need to manually add
117 # any additional compile targets. 117 # any additional compile targets.
118 def main_compile_targets(args): 118 def main_compile_targets(args):
119 json.dump([], args.output) 119 json.dump([], args.output)
120 120
121 121
122 if __name__ == '__main__': 122 if __name__ == '__main__':
123 # Conform minimally to the protocol defined by ScriptTest. 123 # Conform minimally to the protocol defined by ScriptTest.
124 if 'compile_targets' in sys.argv: 124 if 'compile_targets' in sys.argv:
125 funcs = { 125 funcs = {
126 'run': None, 126 'run': None,
127 'compile_targets': main_compile_targets, 127 'compile_targets': main_compile_targets,
128 } 128 }
129 sys.exit(common.run_script(sys.argv[1:], funcs)) 129 sys.exit(common.run_script(sys.argv[1:], funcs))
130 sys.exit(main()) 130 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698