Chromium Code Reviews| Index: scripts/slave/recipe_modules/chromium_tests/steps.py |
| diff --git a/scripts/slave/recipe_modules/chromium_tests/steps.py b/scripts/slave/recipe_modules/chromium_tests/steps.py |
| index 8139e8375cadfbc6ba15acb9ce248250d6aedbbf..380a5d02f53dc8a0a8788d4e6c5896deccea326e 100644 |
| --- a/scripts/slave/recipe_modules/chromium_tests/steps.py |
| +++ b/scripts/slave/recipe_modules/chromium_tests/steps.py |
| @@ -6,6 +6,8 @@ import datetime |
| import re |
| import string |
| +from slave import slave_utils |
| +from slave import results_dashboard |
| class Test(object): |
| """ |
| @@ -1088,6 +1090,8 @@ class SwarmingIsolatedScriptTest(SwarmingTest): |
| if not failures and step_result.retcode != 0: |
| failures = ['%s (entire test suite)' % self.name] |
| valid = False |
| + # Check for chartjson results and upload to results dashboard if present. |
| + self._output_chartjson_results_if_present(api, step_result) |
| except (ValueError, KeyError) as e: |
| step_result.presentation.logs['invalid_results_exc'] = [str(e)] |
| valid = False |
| @@ -1098,6 +1102,43 @@ class SwarmingIsolatedScriptTest(SwarmingTest): |
| ]) |
| return valid, failures |
| + def _output_chartjson_results_if_present(self, api, step_result): |
| + results = |
| + getattr(step_result, 'isolated_script_chartjson_results', None) or {} |
| + try: |
| + if not 'charts' in results: |
| + print 'Info: No chart json present' |
| + return |
| + |
| + if not results.get('enabled', True): |
| + print 'Info: Benchmark disabled, not sending results to dashboard' |
| + return |
| + |
| + main_revision = \ |
| + GetMainRevision(api.properties, api.chromium.c.build_dir) |
| + blink_revision = GetBlinkRevision(api.chromium.c.build_dir) |
|
Ken Russell (switch to Gerrit)
2016/09/28 20:34:26
Blink's now folded into Chromium and doesn't have
eyaich1
2016/09/29 12:43:34
That makes sense, but like I said in the notes I w
|
| + revisions = slave_utils.GetTelemetryRevisions( |
|
Ken Russell (switch to Gerrit)
2016/09/28 20:34:26
Does this function need to be specialized to Telem
eyaich1
2016/09/29 12:43:34
Done.
|
| + api.properties, main_revision, blink_revision) |
| + reference_build = 'reference' in self.name |
| + stripped_test_name = self.name.replace('.reference', '') |
| + dashboard_json = results_dashboard.MakeDashboardJsonV1( |
| + results, |
| + revisions, stripped_test_name, api.properties['perf-id'], |
| + api.properties['buildername'], api.properties['buildnumber'], |
| + None, reference_build) |
| + if dashboard_json : |
| + logging.debug(json.dumps(dashboard_json , indent=2)) |
| + results_dashboard.SendResults( |
| + dashboard_json , |
| + api.properties['results-url'], |
| + api.chromium.c.build_dir) |
| + else: |
| + print 'Error: No json output from telemetry.' |
|
Ken Russell (switch to Gerrit)
2016/09/28 20:34:26
Perhaps 'Error: No perf dashboard JSON was produce
eyaich1
2016/09/29 12:43:34
Done.
|
| + print '@@@STEP_FAILURE@@@' |
| + |
| + except (ValueError, KeyError) as e: |
| + print 'Error: Unable to upload chartjson results to perf dashboard' |
| + |
| def generate_isolated_script(api, chromium_tests_api, mastername, buildername, |
| test_spec, bot_update_step, enable_swarming=False, |