OLD | NEW |
---|---|
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import datetime | 5 import datetime |
6 import re | 6 import re |
7 import string | 7 import string |
8 | 8 |
9 from slave import slave_utils | |
10 from slave import results_dashboard | |
9 | 11 |
10 class Test(object): | 12 class Test(object): |
11 """ | 13 """ |
12 Base class for tests that can be retried after deapplying a previously | 14 Base class for tests that can be retried after deapplying a previously |
13 applied patch. | 15 applied patch. |
14 """ | 16 """ |
15 | 17 |
16 def __init__(self): | 18 def __init__(self): |
17 super(Test, self).__init__() | 19 super(Test, self).__init__() |
18 self._test_runs = {} | 20 self._test_runs = {} |
(...skipping 1062 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1081 | 1083 |
1082 def validate_task_results(self, api, step_result): | 1084 def validate_task_results(self, api, step_result): |
1083 results = getattr(step_result, 'isolated_script_results', None) or {} | 1085 results = getattr(step_result, 'isolated_script_results', None) or {} |
1084 | 1086 |
1085 try: | 1087 try: |
1086 failures = results['failures'] | 1088 failures = results['failures'] |
1087 valid = results['valid'] | 1089 valid = results['valid'] |
1088 if not failures and step_result.retcode != 0: | 1090 if not failures and step_result.retcode != 0: |
1089 failures = ['%s (entire test suite)' % self.name] | 1091 failures = ['%s (entire test suite)' % self.name] |
1090 valid = False | 1092 valid = False |
1093 # Check for chartjson results and upload to results dashboard if present. | |
1094 self._output_chartjson_results_if_present(api, step_result) | |
1091 except (ValueError, KeyError) as e: | 1095 except (ValueError, KeyError) as e: |
1092 step_result.presentation.logs['invalid_results_exc'] = [str(e)] | 1096 step_result.presentation.logs['invalid_results_exc'] = [str(e)] |
1093 valid = False | 1097 valid = False |
1094 failures = None | 1098 failures = None |
1095 if valid: | 1099 if valid: |
1096 step_result.presentation.step_text += api.test_utils.format_step_text([ | 1100 step_result.presentation.step_text += api.test_utils.format_step_text([ |
1097 ['failures:', failures] | 1101 ['failures:', failures] |
1098 ]) | 1102 ]) |
1099 return valid, failures | 1103 return valid, failures |
1100 | 1104 |
1105 def _output_chartjson_results_if_present(self, api, step_result): | |
1106 results = \ | |
1107 getattr(step_result, 'isolated_script_chartjson_results', None) or {} | |
1108 try: | |
1109 if not 'charts' in results: | |
1110 print 'Info: No chart json present' | |
1111 return | |
1112 | |
1113 if not results.get('enabled', True): | |
1114 print 'Info: Benchmark disabled, not sending results to dashboard' | |
1115 return | |
1116 | |
1117 main_revision = \ | |
1118 slave_utils.GetMainRevision(api.properties, api.chromium.c.build_dir) | |
1119 blink_revision = slave_utils.GetBlinkRevision(api.chromium.c.build_dir) | |
1120 revisions = slave_utils.GetPerfDashboardRevisions( | |
1121 api.properties, main_revision, blink_revision) | |
1122 reference_build = 'reference' in self.name | |
1123 stripped_test_name = self.name.replace('.reference', '') | |
1124 dashboard_json = results_dashboard.MakeDashboardJsonV1( | |
1125 results, | |
1126 revisions, stripped_test_name, api.properties['perf-id'], | |
1127 api.properties['buildername'], api.properties['buildnumber'], | |
1128 None, reference_build) | |
1129 if dashboard_json: # pragma: no cover | |
Ken Russell (switch to Gerrit)
2016/09/30 21:45:32
Could you add a TODO or comment related to the add
eyaich1
2016/10/03 13:32:56
Done. It was just a small bug in the infrastructu
| |
1130 logging.debug(json.dumps(dashboard_json , indent=2)) | |
1131 results_dashboard.SendResults( | |
1132 dashboard_json, | |
1133 api.properties['results-url'], | |
1134 api.chromium.c.build_dir) | |
1135 else: # pragma: no cover | |
1136 print 'Error: No perf dashboard JSON was produced.' | |
1137 print '@@@STEP_FAILURE@@@' | |
1138 | |
1139 except (ValueError, KeyError) as e: | |
1140 print 'Error: Unable to upload chartjson results to perf dashboard' | |
1141 | |
1101 | 1142 |
1102 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, | 1143 def generate_isolated_script(api, chromium_tests_api, mastername, buildername, |
1103 test_spec, bot_update_step, enable_swarming=False, | 1144 test_spec, bot_update_step, enable_swarming=False, |
1104 swarming_dimensions=None, | 1145 swarming_dimensions=None, |
1105 scripts_compile_targets=None): | 1146 scripts_compile_targets=None): |
1106 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): | 1147 for spec in test_spec.get(buildername, {}).get('isolated_scripts', []): |
1107 use_swarming = False | 1148 use_swarming = False |
1108 swarming_shards = 1 | 1149 swarming_shards = 1 |
1109 swarming_dimension_sets = None | 1150 swarming_dimension_sets = None |
1110 swarming_priority = None | 1151 swarming_priority = None |
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1751 args=args) | 1792 args=args) |
1752 api.gsutil.upload( | 1793 api.gsutil.upload( |
1753 temp_output_dir.join( | 1794 temp_output_dir.join( |
1754 '%s-android-chrome.json' % timestamp_string), | 1795 '%s-android-chrome.json' % timestamp_string), |
1755 'chromium-annotated-tests', 'android') | 1796 'chromium-annotated-tests', 'android') |
1756 | 1797 |
1757 GOMA_TESTS = [ | 1798 GOMA_TESTS = [ |
1758 GTestTest('base_unittests'), | 1799 GTestTest('base_unittests'), |
1759 GTestTest('content_unittests'), | 1800 GTestTest('content_unittests'), |
1760 ] | 1801 ] |
OLD | NEW |