OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2015 The Chromium Authors. All rights reserved. | 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runs an isolate bundled Telemetry benchmark. | 6 """Runs an isolate bundled Telemetry benchmark. |
7 | 7 |
8 This script attempts to emulate the contract of gtest-style tests | 8 This script attempts to emulate the contract of gtest-style tests |
9 invoked via recipes. The main contract is that the caller passes the | 9 invoked via recipes. The main contract is that the caller passes the |
10 argument: | 10 argument: |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 if args.xvfb and xvfb.should_start_xvfb(env): | 59 if args.xvfb and xvfb.should_start_xvfb(env): |
60 xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, | 60 xvfb_proc, openbox_proc, xcompmgr_proc = xvfb.start_xvfb(env=env, |
61 build_dir='.') | 61 build_dir='.') |
62 assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' | 62 assert xvfb_proc and openbox_proc and xcompmgr_proc, 'Failed to start xvfb' |
63 try: | 63 try: |
64 tempfile_dir = tempfile.mkdtemp('telemetry') | 64 tempfile_dir = tempfile.mkdtemp('telemetry') |
65 valid = True | 65 valid = True |
66 failures = [] | 66 failures = [] |
67 chartjson_results_present = '--output-format=chartjson' in rest_args | 67 chartjson_results_present = '--output-format=chartjson' in rest_args |
68 chartresults = None | 68 chartresults = None |
| 69 |
| 70 results = None |
69 try: | 71 try: |
70 rc = common.run_command([sys.executable] + rest_args + [ | 72 rc = common.run_command([sys.executable] + rest_args + [ |
71 '--output-dir', tempfile_dir, | 73 '--output-dir', tempfile_dir, |
72 '--output-format=json' | 74 '--output-format=json' |
73 ], env=env) | 75 ], env=env) |
74 # If we have also output chartjson read it in and return it. | 76 # If we have also output chartjson read it in and return it. |
75 # results-chart.json is the file name output by telemetry when the | 77 # results-chart.json is the file name output by telemetry when the |
76 # chartjson output format is included | 78 # chartjson output format is included |
77 if chartjson_results_present: | 79 if chartjson_results_present: |
78 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json') | 80 chart_tempfile_name = os.path.join(tempfile_dir, 'results-chart.json') |
79 with open(chart_tempfile_name) as f: | 81 with open(chart_tempfile_name) as f: |
80 chartresults = json.load(f) | 82 chartresults = json.load(f) |
81 # We need to get chartjson results first as this may be a disabled | 83 # We need to get chartjson results first as this may be a disabled |
82 # benchmark that was run | 84 # benchmark that was run |
83 if (not chartjson_results_present or | 85 if (not chartjson_results_present or |
84 (chartjson_results_present and chartresults.get('enabled', True))): | 86 (chartjson_results_present and chartresults.get('enabled', True))): |
85 tempfile_name = os.path.join(tempfile_dir, 'results.json') | 87 tempfile_name = os.path.join(tempfile_dir, 'results.json') |
86 with open(tempfile_name) as f: | 88 with open(tempfile_name) as f: |
87 results = json.load(f) | 89 results = json.load(f) |
88 for value in results['per_page_values']: | 90 for value in results['per_page_values']: |
89 if value['type'] == 'failure': | 91 if value['type'] == 'failure': |
90 failures.append(results['pages'][str(value['page_id'])]['name']) | 92 page_data = results['pages'][str(value['page_id'])] |
| 93 name = page_data.get('name') |
| 94 if not name: |
| 95 name = page_data['url'] |
| 96 |
| 97 failures.append(name) |
91 valid = bool(rc == 0 or failures) | 98 valid = bool(rc == 0 or failures) |
92 | 99 |
93 except Exception: | 100 except Exception: |
94 traceback.print_exc() | 101 traceback.print_exc() |
| 102 if results: |
| 103 print 'results, which possibly caused exception: %s' % json.dumps( |
| 104 results, indent=2) |
95 valid = False | 105 valid = False |
96 finally: | 106 finally: |
97 shutil.rmtree(tempfile_dir) | 107 shutil.rmtree(tempfile_dir) |
98 | 108 |
99 if not valid and not failures: | 109 if not valid and not failures: |
100 failures = ['(entire test suite)'] | 110 failures = ['(entire test suite)'] |
101 if rc == 0: | 111 if rc == 0: |
102 rc = 1 # Signal an abnormal exit. | 112 rc = 1 # Signal an abnormal exit. |
103 | 113 |
104 if chartjson_results_present and args.isolated_script_test_chartjson_output: | 114 if chartjson_results_present and args.isolated_script_test_chartjson_output: |
(...skipping 21 matching lines...) Expand all Loading... |
126 | 136 |
127 if __name__ == '__main__': | 137 if __name__ == '__main__': |
128 # Conform minimally to the protocol defined by ScriptTest. | 138 # Conform minimally to the protocol defined by ScriptTest. |
129 if 'compile_targets' in sys.argv: | 139 if 'compile_targets' in sys.argv: |
130 funcs = { | 140 funcs = { |
131 'run': None, | 141 'run': None, |
132 'compile_targets': main_compile_targets, | 142 'compile_targets': main_compile_targets, |
133 } | 143 } |
134 sys.exit(common.run_script(sys.argv[1:], funcs)) | 144 sys.exit(common.run_script(sys.argv[1:], funcs)) |
135 sys.exit(main()) | 145 sys.exit(main()) |
OLD | NEW |