Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(168)

Side by Side Diff: testing/scripts/run_telemetry_benchmark_as_googletest.py

Issue 2385183002: Ignoring all results but chartjson on disabled benchmark run. (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs an isolate bundled Telemetry benchmark. 6 """Runs an isolate bundled Telemetry benchmark.
7 7
8 This script attempts to emulate the contract of gtest-style tests 8 This script attempts to emulate the contract of gtest-style tests
9 invoked via recipes. The main contract is that the caller passes the 9 invoked via recipes. The main contract is that the caller passes the
10 argument: 10 argument:
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
64 tempfile_dir = tempfile.mkdtemp('telemetry') 64 tempfile_dir = tempfile.mkdtemp('telemetry')
65 valid = True 65 valid = True
66 failures = [] 66 failures = []
67 chartjson_results_present = '--output-format=chartjson' in rest_args 67 chartjson_results_present = '--output-format=chartjson' in rest_args
68 chartresults = None 68 chartresults = None
69 try: 69 try:
70 rc = common.run_command([sys.executable] + rest_args + [ 70 rc = common.run_command([sys.executable] + rest_args + [
71 '--output-dir', tempfile_dir, 71 '--output-dir', tempfile_dir,
72 '--output-format=json' 72 '--output-format=json'
73 ], env=env) 73 ], env=env)
74 tempfile_name = os.path.join(tempfile_dir, 'results.json') 74 # If we have also output chartjson read it in and return it.
Ken Russell (switch to Gerrit) 2016/10/03 21:20:49 Strange indent?
75 with open(tempfile_name) as f:
76 results = json.load(f)
77 for value in results['per_page_values']:
78 if value['type'] == 'failure':
79 failures.append(results['pages'][str(value['page_id'])]['name'])
80 valid = bool(rc == 0 or failures)
81 # If we have also output chartjson read it in and return it.
82 # results-chart.json is the file name output by telemetry when the 75 # results-chart.json is the file name output by telemetry when the
83 # chartjson output format is included 76 # chartjson output format is included
84 if chartjson_results_present: 77 if chartjson_results_present:
85 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json') 78 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json')
86 with open(chart_tempfile_name) as f: 79 with open(chart_tempfile_name) as f:
87 chartresults = json.load(f) 80 chartresults = json.load(f)
81 # We need to get chartjson results first as this may be a disabled
82 # benchmark that was run
83 if (not chartjson_results_present or
84 (chartjson_results_present and chartresults.get('enabled', True))):
85 tempfile_name = os.path.join(tempfile_dir, 'results.json')
86 with open(tempfile_name) as f:
87 results = json.load(f)
88 for value in results['per_page_values']:
89 if value['type'] == 'failure':
90 failures.append(results['pages'][str(value['page_id'])]['name'])
91 valid = bool(rc == 0 or failures)
92
88 except Exception: 93 except Exception:
89 traceback.print_exc() 94 traceback.print_exc()
90 valid = False 95 valid = False
91 finally: 96 finally:
92 shutil.rmtree(tempfile_dir) 97 shutil.rmtree(tempfile_dir)
93 98
94 if not valid and not failures: 99 if not valid and not failures:
95 failures = ['(entire test suite)'] 100 failures = ['(entire test suite)']
96 if rc == 0: 101 if rc == 0:
97 rc = 1 # Signal an abnormal exit. 102 rc = 1 # Signal an abnormal exit.
(...skipping 23 matching lines...) Expand all
121 126
122 if __name__ == '__main__': 127 if __name__ == '__main__':
123 # Conform minimally to the protocol defined by ScriptTest. 128 # Conform minimally to the protocol defined by ScriptTest.
124 if 'compile_targets' in sys.argv: 129 if 'compile_targets' in sys.argv:
125 funcs = { 130 funcs = {
126 'run': None, 131 'run': None,
127 'compile_targets': main_compile_targets, 132 'compile_targets': main_compile_targets,
128 } 133 }
129 sys.exit(common.run_script(sys.argv[1:], funcs)) 134 sys.exit(common.run_script(sys.argv[1:], funcs))
130 sys.exit(main()) 135 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698