OLD | NEW |
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import datetime | 5 import datetime |
6 import json | |
7 import re | 6 import re |
8 import string | 7 import string |
9 | 8 |
10 | 9 |
11 class Test(object): | 10 class Test(object): |
12 """ | 11 """ |
13 Base class for tests that can be retried after deapplying a previously | 12 Base class for tests that can be retried after deapplying a previously |
14 applied patch. | 13 applied patch. |
15 """ | 14 """ |
16 | 15 |
(...skipping 1065 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1082 | 1081 |
1083 def validate_task_results(self, api, step_result): | 1082 def validate_task_results(self, api, step_result): |
1084 results = getattr(step_result, 'isolated_script_results', None) or {} | 1083 results = getattr(step_result, 'isolated_script_results', None) or {} |
1085 | 1084 |
1086 try: | 1085 try: |
1087 failures = results['failures'] | 1086 failures = results['failures'] |
1088 valid = results['valid'] | 1087 valid = results['valid'] |
1089 if not failures and step_result.retcode != 0: | 1088 if not failures and step_result.retcode != 0: |
1090 failures = ['%s (entire test suite)' % self.name] | 1089 failures = ['%s (entire test suite)' % self.name] |
1091 valid = False | 1090 valid = False |
1092 | |
1093 except (ValueError, KeyError) as e: | 1091 except (ValueError, KeyError) as e: |
1094 step_result.presentation.logs['invalid_results_exc'] = [str(e)] | 1092 step_result.presentation.logs['invalid_results_exc'] = [str(e)] |
1095 valid = False | 1093 valid = False |
1096 failures = None | 1094 failures = None |
1097 if valid: | 1095 if valid: |
1098 step_result.presentation.step_text += api.test_utils.format_step_text([ | 1096 step_result.presentation.step_text += api.test_utils.format_step_text([ |
1099 ['failures:', failures] | 1097 ['failures:', failures] |
1100 ]) | 1098 ]) |
1101 # Check for chartjson results and upload to results dashboard if present. | |
1102 self._output_chartjson_results_if_present(api, step_result) | |
1103 return valid, failures | 1099 return valid, failures |
1104 | 1100 |
1105 def _output_chartjson_results_if_present(self, api, step_result): | |
1106 results = \ | |
1107 getattr(step_result, 'isolated_script_chartjson_results', None) or {} | |
1108 try: | |
1109 if not 'charts' in results: | |
1110 print 'Info: No chart json present' | |
1111 return | |
1112 | |
1113 if not results.get('enabled', True): | |
1114 print 'Info: Benchmark disabled, not sending results to dashboard' | |
1115 return | |
1116 | |
1117 """Produces a step that uploads results to dashboard""" | |
1118 args = [ | |
1119 '--results', json.dumps(results), | |
1120 '--perf-id', api.properties['perf-id'], | |
1121 '--results-url', api.properties['results-url'], | |
1122 '--build-dir', api.chromium.c.build_dir, | |
1123 '--got-revision-cp', api.properties['got_revision_cp'], | |
1124 '--version', api.properties['version'], | |
1125 '--git-revision', api.properties['git_revision'], | |
1126 '--buildername', api.properties['buildername'], | |
1127 '--buildnumber', api.properties['buildnumber'], | |
1128 '--got-webrtc-revision', api.properties['got_webrtc_revision'], | |
1129 '--got-v8-revision', api.properties['got_v8_revision'], | |
1130 ] | |
1131 | |
1132 api.python( | |
1133 'Upload Perf Dashboard Results', | |
1134 api.chromium.package_repo_resource( | |
1135 'scripts', 'slave', 'upload_perf_dashboard_results.py'), | |
1136 args) | |
1137 | |
1138 except (ValueError, KeyError) as e: | |
1139 print 'Error: Unable to upload chartjson results to perf dashboard' | |
1140 | |
1141 | 1101 |
1142 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, | 1102 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, |
1143 test_spec, bot_update_step, enable_swarming=False, | 1103 test_spec, bot_update_step, enable_swarming=False, |
1144 swarming_dimensions=None, | 1104 swarming_dimensions=None, |
1145 scripts_compile_targets=None): | 1105 scripts_compile_targets=None): |
1146 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): | 1106 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): |
1147 use_swarming = False | 1107 use_swarming = False |
1148 swarming_shards = 1 | 1108 swarming_shards = 1 |
1149 swarming_dimension_sets = None | 1109 swarming_dimension_sets = None |
1150 swarming_priority = None | 1110 swarming_priority = None |
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1791 args=args) | 1751 args=args) |
1792 api.gsutil.upload( | 1752 api.gsutil.upload( |
1793 temp_output_dir.join( | 1753 temp_output_dir.join( |
1794 '%s-android-chrome.json' % timestamp_string), | 1754 '%s-android-chrome.json' % timestamp_string), |
1795 'chromium-annotated-tests', 'android') | 1755 'chromium-annotated-tests', 'android') |
1796 | 1756 |
1797 GOMA_TESTS = [ | 1757 GOMA_TESTS = [ |
1798 GTestTest('base_unittests'), | 1758 GTestTest('base_unittests'), |
1799 GTestTest('content_unittests'), | 1759 GTestTest('content_unittests'), |
1800 ] | 1760 ] |
OLD | NEW |