Index: scripts/slave/recipe_modules/chromium_tests/steps.py |
diff --git a/scripts/slave/recipe_modules/chromium_tests/steps.py b/scripts/slave/recipe_modules/chromium_tests/steps.py |
index a67bb356e0e7cb6c155c5e58dc5cf2366a1cf3a1..65c9e9ee20f93c4c22c1a97c4ebe51d30b32e869 100644 |
--- a/scripts/slave/recipe_modules/chromium_tests/steps.py |
+++ b/scripts/slave/recipe_modules/chromium_tests/steps.py |
@@ -6,6 +6,8 @@ import datetime |
import re |
import string |
+from slave import slave_utils |
+from slave import results_dashboard |
class Test(object): |
""" |
@@ -1082,6 +1084,34 @@ class SwarmingIsolatedScriptTest(SwarmingTest): |
if not failures and step_result.retcode != 0: |
failures = ['%s (entire test suite)' % self.name] |
valid = False |
+ if 'chartjson' in results and 'charts' in results['chartjson']: |
Ken Russell (switch to Gerrit)
2016/09/12 19:35:40
Per comment on dependent CL https://codereview.chr
eyaich1
2016/09/13 16:52:59
Done.
|
+ # This is a swarmed benchmark, need to upload results to the dashboard |
+ # Not sure if we still need to process the valid/failures since the |
+ # output we need will be written by telemetry and the retcode should |
+ # turn the test red or green |
+ main_revision = \ |
+ GetMainRevision(api.properties, api.chromium.c.build_dir) |
+ blink_revision = GetBlinkRevision(api.chromium.c.build_dir): |
+ revisions = slave_utils.GetTelemetryRevisions( |
+ api.properties, main_revision, blink_revision) |
+ reference_build = 'reference' in self.name |
+ stripped_test_name = self.name.replace('.reference', '') |
+ dashboard_json = results_dashboard.MakeDashboardJsonV1( |
+ results['chartjson'], |
+ revisions, stripped_test_name, api.properties['perf-id'], |
+ api.properties['buildername'], api.properties['buildnumber'], |
+ None, reference_build) |
+ if dashboard_json : |
+ logging.debug(json.dumps(dashboard_json , indent=2)) |
+ results_dashboard.SendResults( |
+ dashboard_json , |
+ api.properties['results-url'], |
+ api.chromium.c.build_dir) |
+ else: |
+ print 'Error: No json output from telemetry.' |
+ print '@@@STEP_FAILURE@@@' |
+ |
+ |
except (ValueError, KeyError) as e: |
step_result.presentation.logs['invalid_results_exc'] = [str(e)] |
valid = False |