Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import datetime | 5 import datetime |
| 6 import json | |
| 6 import re | 7 import re |
| 7 import string | 8 import string |
| 8 | 9 |
| 9 | 10 |
| 10 class Test(object): | 11 class Test(object): |
| 11 """ | 12 """ |
| 12 Base class for tests that can be retried after deapplying a previously | 13 Base class for tests that can be retried after deapplying a previously |
| 13 applied patch. | 14 applied patch. |
| 14 """ | 15 """ |
| 15 | 16 |
| (...skipping 1065 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1081 | 1082 |
| 1082 def validate_task_results(self, api, step_result): | 1083 def validate_task_results(self, api, step_result): |
| 1083 results = getattr(step_result, 'isolated_script_results', None) or {} | 1084 results = getattr(step_result, 'isolated_script_results', None) or {} |
| 1084 | 1085 |
| 1085 try: | 1086 try: |
| 1086 failures = results['failures'] | 1087 failures = results['failures'] |
| 1087 valid = results['valid'] | 1088 valid = results['valid'] |
| 1088 if not failures and step_result.retcode != 0: | 1089 if not failures and step_result.retcode != 0: |
| 1089 failures = ['%s (entire test suite)' % self.name] | 1090 failures = ['%s (entire test suite)' % self.name] |
| 1090 valid = False | 1091 valid = False |
| 1092 | |
| 1091 except (ValueError, KeyError) as e: | 1093 except (ValueError, KeyError) as e: |
| 1092 step_result.presentation.logs['invalid_results_exc'] = [str(e)] | 1094 step_result.presentation.logs['invalid_results_exc'] = [str(e)] |
| 1093 valid = False | 1095 valid = False |
| 1094 failures = None | 1096 failures = None |
| 1095 if valid: | 1097 if valid: |
| 1096 step_result.presentation.step_text += api.test_utils.format_step_text([ | 1098 step_result.presentation.step_text += api.test_utils.format_step_text([ |
| 1097 ['failures:', failures] | 1099 ['failures:', failures] |
| 1098 ]) | 1100 ]) |
| 1101 # Check for chartjson results and upload to results dashboard if present. | |
| 1102 self._output_chartjson_results_if_present(api, step_result) | |
| 1099 return valid, failures | 1103 return valid, failures |
| 1100 | 1104 |
| 1105 def _output_chartjson_results_if_present(self, api, step_result): | |
| 1106 results = \ | |
| 1107 getattr(step_result, 'isolated_script_chartjson_results', None) or {} | |
| 1108 try: | |
| 1109 if not 'charts' in results: | |
| 1110 print 'Info: No chart json present' | |
|
tandrii(chromium)
2016/10/11 12:21:43
This is now printed while simulation_test is run,
| |
| 1111 return | |
| 1112 | |
| 1113 if not results.get('enabled', True): | |
| 1114 print 'Info: Benchmark disabled, not sending results to dashboard' | |
| 1115 return | |
| 1116 | |
| 1117 """Produces a step that uploads results to dashboard""" | |
| 1118 args = [ | |
| 1119 '--results', json.dumps(results), | |
| 1120 '--perf-id', api.properties['perf-id'], | |
| 1121 '--results-url', api.properties['results-url'], | |
| 1122 '--build-dir', api.chromium.c.build_dir, | |
| 1123 '--got-revision-cp', api.properties['got_revision_cp'], | |
| 1124 '--version', api.properties['version'], | |
| 1125 '--git-revision', api.properties['git_revision'], | |
| 1126 '--buildername', api.properties['buildername'], | |
| 1127 '--buildnumber', api.properties['buildnumber'], | |
| 1128 '--got-webrtc-revision', api.properties['got_webrtc_revision'], | |
| 1129 '--got-v8-revision', api.properties['got_v8_revision'], | |
| 1130 ] | |
| 1131 | |
| 1132 api.python( | |
| 1133 'Upload Perf Dashboard Results', | |
| 1134 api.chromium.package_repo_resource( | |
| 1135 'scripts', 'slave', 'upload_perf_dashboard_results.py'), | |
| 1136 args) | |
| 1137 | |
| 1138 except (ValueError, KeyError) as e: | |
| 1139 print 'Error: Unable to upload chartjson results to perf dashboard' | |
| 1140 | |
| 1101 | 1141 |
| 1102 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, | 1142 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, |
| 1103 test_spec, bot_update_step, enable_swarming=False, | 1143 test_spec, bot_update_step, enable_swarming=False, |
| 1104 swarming_dimensions=None, | 1144 swarming_dimensions=None, |
| 1105 scripts_compile_targets=None): | 1145 scripts_compile_targets=None): |
| 1106 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): | 1146 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): |
| 1107 use_swarming = False | 1147 use_swarming = False |
| 1108 swarming_shards = 1 | 1148 swarming_shards = 1 |
| 1109 swarming_dimension_sets = None | 1149 swarming_dimension_sets = None |
| 1110 swarming_priority = None | 1150 swarming_priority = None |
| (...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1751 args=args) | 1791 args=args) |
| 1752 api.gsutil.upload( | 1792 api.gsutil.upload( |
| 1753 temp_output_dir.join( | 1793 temp_output_dir.join( |
| 1754 '%s-android-chrome.json' % timestamp_string), | 1794 '%s-android-chrome.json' % timestamp_string), |
| 1755 'chromium-annotated-tests', 'android') | 1795 'chromium-annotated-tests', 'android') |
| 1756 | 1796 |
| 1757 GOMA_TESTS = [ | 1797 GOMA_TESTS = [ |
| 1758 GTestTest('base_unittests'), | 1798 GTestTest('base_unittests'), |
| 1759 GTestTest('content_unittests'), | 1799 GTestTest('content_unittests'), |
| 1760 ] | 1800 ] |
| OLD | NEW |