Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(281)

Side by Side Diff: scripts/slave/recipe_modules/chromium_tests/steps.py

Issue 2410613002: Change SwarmingIsolatedScriptTest to upload json format results (Closed)
Patch Set: Rebase Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | scripts/slave/recipes/chromium.expected/dynamic_swarmed_sharded_failed_isolated_script_test.json » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2014 The Chromium Authors. All rights reserved. 1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import datetime 5 import datetime
6 import json 6 import json
7 import re 7 import re
8 import string 8 import string
9 9
10 10
(...skipping 1031 matching lines...) Expand 10 before | Expand all | Expand 10 after
1042 self.failures(api, suffix) 1042 self.failures(api, suffix)
1043 1043
1044 return self._test_runs[suffix].json.output['valid'] 1044 return self._test_runs[suffix].json.output['valid']
1045 except Exception: # pragma: no cover 1045 except Exception: # pragma: no cover
1046 return False 1046 return False
1047 1047
1048 def failures(self, api, suffix): 1048 def failures(self, api, suffix):
1049 return self._test_runs[suffix].json.output['failures'] 1049 return self._test_runs[suffix].json.output['failures']
1050 1050
1051 1051
1052 def is_json_results_format(results):
1053 return results.get('version', 0) == 3
1054
1055
1052 class SwarmingIsolatedScriptTest(SwarmingTest): 1056 class SwarmingIsolatedScriptTest(SwarmingTest):
1053 def __init__(self, name, args=None, target_name=None, shards=1, 1057 def __init__(self, name, args=None, target_name=None, shards=1,
1054 dimensions=None, tags=None, extra_suffix=None, priority=None, 1058 dimensions=None, tags=None, extra_suffix=None, priority=None,
1055 expiration=None, hard_timeout=None, upload_test_results=True, 1059 expiration=None, hard_timeout=None, upload_test_results=True,
1056 override_compile_targets=None, perf_id=None, 1060 override_compile_targets=None, perf_id=None,
1057 results_url=None, perf_dashboard_id=None): 1061 results_url=None, perf_dashboard_id=None):
1058 super(SwarmingIsolatedScriptTest, self).__init__( 1062 super(SwarmingIsolatedScriptTest, self).__init__(
1059 name, dimensions, tags, target_name, extra_suffix, priority, expiration, hard_timeout) 1063 name, dimensions, tags, target_name, extra_suffix, priority, expiration, hard_timeout)
1060 self._args = args or [] 1064 self._args = args or []
1061 self._shards = shards 1065 self._shards = shards
1062 self._upload_test_results = upload_test_results 1066 self._upload_test_results = upload_test_results
1063 self._override_compile_targets = override_compile_targets 1067 self._override_compile_targets = override_compile_targets
1064 self._perf_id=perf_id 1068 self._perf_id=perf_id
1065 self._results_url = results_url 1069 self._results_url = results_url
1066 self._perf_dashboard_id = perf_dashboard_id 1070 self._perf_dashboard_id = perf_dashboard_id
1071 self._isolated_script_results = {}
1067 1072
1068 @property 1073 @property
1069 def target_name(self): 1074 def target_name(self):
1070 return self._target_name or self._name 1075 return self._target_name or self._name
1071 1076
1072 def compile_targets(self, _): 1077 def compile_targets(self, _):
1073 if self._override_compile_targets: 1078 if self._override_compile_targets:
1074 return self._override_compile_targets 1079 return self._override_compile_targets
1075 return [self.target_name] 1080 return [self.target_name]
1076 1081
(...skipping 19 matching lines...) Expand all
1096 1101
1097 def validate_json_test_results(self, api, results): 1102 def validate_json_test_results(self, api, results):
1098 test_results = api.test_utils.create_results_from_json(results) 1103 test_results = api.test_utils.create_results_from_json(results)
1099 tests = test_results.tests 1104 tests = test_results.tests
1100 failures = list( 1105 failures = list(
1101 t for t in tests 1106 t for t in tests
1102 if all(res not in tests[t]['expected'].split() 1107 if all(res not in tests[t]['expected'].split()
1103 for res in tests[t]['actual'].split())) 1108 for res in tests[t]['actual'].split()))
1104 return True, failures 1109 return True, failures
1105 1110
1111 def upload_json_format_results(self, api, results):
1112 chrome_revision_cp = api.bot_update.last_returned_properties.get(
1113 'got_revision_cp', 'x@{#0}')
1114 chrome_revision = str(api.commit_position.parse_revision(
1115 chrome_revision_cp))
1116 api.test_results.upload(
1117 api.json.input(results), chrome_revision=chrome_revision,
1118 test_type=self.name,
1119 test_results_server='test-results.appspot.com')
1120
1106 def validate_task_results(self, api, step_result): 1121 def validate_task_results(self, api, step_result):
1107 results = getattr(step_result, 'isolated_script_results', None) or {} 1122 results = getattr(step_result, 'isolated_script_results', None) or {}
1108 valid = True 1123 valid = True
1109 failures = [] 1124 failures = []
1110 try: 1125 try:
1111 if results.get('version', 0) == 3: 1126 if is_json_results_format(results):
1112 valid, failures = self.validate_json_test_results(api, results) 1127 valid, failures = self.validate_json_test_results(api, results)
1113 else: 1128 else:
1114 valid, failures = self.validate_simplified_results(results) 1129 valid, failures = self.validate_simplified_results(results)
1115 except (ValueError, KeyError) as e: 1130 except (ValueError, KeyError) as e:
1116 step_result.presentation.logs['invalid_results_exc'] = [repr(e)] 1131 step_result.presentation.logs['invalid_results_exc'] = [repr(e)]
1117 valid = False 1132 valid = False
1118 failures = None 1133 failures = None
1119 if not failures and step_result.retcode != 0: 1134 if not failures and step_result.retcode != 0:
1120 failures = ['%s (entire test suite)' % self.name] 1135 failures = ['%s (entire test suite)' % self.name]
1121 valid = False 1136 valid = False
1122 if valid: 1137 if valid:
1138 self._isolated_script_results = results
1123 step_result.presentation.step_text += api.test_utils.format_step_text([ 1139 step_result.presentation.step_text += api.test_utils.format_step_text([
1124 ['failures:', failures] 1140 ['failures:', failures]
1125 ]) 1141 ])
1126 # Check for chartjson results and upload to results dashboard if present. 1142 # Check for chartjson results and upload to results dashboard if present.
1127 self._output_chartjson_results_if_present(api, step_result) 1143 self._output_chartjson_results_if_present(api, step_result)
1128 return valid, failures 1144 return valid, failures
1129 1145
1146 def post_run(self, api, suffix, test_filter=None):
1147 try:
1148 super(SwarmingIsolatedScriptTest, self).post_run(
1149 api, suffix, test_filter=test_filter)
1150 finally:
1151 results = self._isolated_script_results
1152 if self._upload_test_results and is_json_results_format(results):
1153 self.upload_json_format_results(api, results)
1154
1130 def _output_chartjson_results_if_present(self, api, step_result): 1155 def _output_chartjson_results_if_present(self, api, step_result):
1131 results = \ 1156 results = \
1132 getattr(step_result, 'isolated_script_chartjson_results', None) or {} 1157 getattr(step_result, 'isolated_script_chartjson_results', None) or {}
1133 try: 1158 try:
1134 if not results.get('enabled', True): 1159 if not results.get('enabled', True):
1135 step_result.presentation.logs['DISABLED_BENCHMARK'] = \ 1160 step_result.presentation.logs['DISABLED_BENCHMARK'] = \
1136 ['Info: Benchmark disabled, not sending results to dashboard'] 1161 ['Info: Benchmark disabled, not sending results to dashboard']
1137 return 1162 return
1138 1163
1139 # TODO(eyaich): Remove logging once we debug uploading chartjson 1164 # TODO(eyaich): Remove logging once we debug uploading chartjson
(...skipping 742 matching lines...) Expand 10 before | Expand all | Expand 10 after
1882 args=args) 1907 args=args)
1883 api.gsutil.upload( 1908 api.gsutil.upload(
1884 temp_output_dir.join( 1909 temp_output_dir.join(
1885 '%s-android-chrome.json' % timestamp_string), 1910 '%s-android-chrome.json' % timestamp_string),
1886 'chromium-annotated-tests', 'android') 1911 'chromium-annotated-tests', 'android')
1887 1912
1888 GOMA_TESTS = [ 1913 GOMA_TESTS = [
1889 GTestTest('base_unittests'), 1914 GTestTest('base_unittests'),
1890 GTestTest('content_unittests'), 1915 GTestTest('content_unittests'),
1891 ] 1916 ]
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/recipes/chromium.expected/dynamic_swarmed_sharded_failed_isolated_script_test.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698