Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(515)

Unified Diff: scripts/slave/recipe_modules/bisect_tester/api.py

Issue 2247373002: Refactor stages 1, 2 and test_api overhaul. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: scripts/slave/recipe_modules/bisect_tester/api.py
diff --git a/scripts/slave/recipe_modules/bisect_tester/api.py b/scripts/slave/recipe_modules/bisect_tester/api.py
index 2766dc8ded69e9df0ee9a7a1512f83870da8658c..22031825ac342dcb6f25a782cd5cbcb5a12421cd 100644
--- a/scripts/slave/recipe_modules/bisect_tester/api.py
+++ b/scripts/slave/recipe_modules/bisect_tester/api.py
@@ -10,6 +10,7 @@ from . import perf_test
BUCKET = 'chrome-perf'
RESULTS_GS_DIR = 'bisect-results'
+PARSER_PATH = 'tracing/bin/parse_metric'.split('/')
class BisectTesterApi(recipe_api.RecipeApi):
@@ -24,7 +25,7 @@ class BisectTesterApi(recipe_api.RecipeApi):
return self._device_to_test
@device_to_test.setter
- def device_to_test(self, value):
+ def device_to_test(self, value):
self._device_to_test = value
def local_test_enabled(self):
@@ -50,54 +51,25 @@ class BisectTesterApi(recipe_api.RecipeApi):
"""Exposes perf tests implementation."""
return perf_test.run_perf_test(self, test_config, **kwargs)
- def digest_run_results(self, run_results, retcodes, cfg):
- # TODO(qyearsley): Change this to not use cfg or retcodes and just
- # return values (or error) regardless of test_type.
- if not run_results or not retcodes: # pragma: no cover
- return {'error': 'No values to aggregate.'}
+ def digest_run_results(self, run_results, cfg):
if cfg.get('test_type') == 'return_code':
- return {'values': retcodes}
- return {'values': run_results['measured_values']}
-
- def upload_results(self, output, results, retcodes, test_parameters):
- """Puts the results as a JSON file in a GS bucket."""
- job_name = (test_parameters.get('job_name') or
- self.m.properties.get('job_name'))
- gs_filename = '%s/%s.results' % (RESULTS_GS_DIR, job_name)
- contents = {'results': results, 'output': output, 'retcodes': retcodes}
- contents_json = json.dumps(contents)
- local_save_results = self.m.python('saving json to temp file',
- self.resource('put_temp.py'),
- stdout=self.m.raw_io.output(),
- stdin=self.m.raw_io.input(
- contents_json))
-
- local_file = local_save_results.stdout.splitlines()[0].strip()
- # TODO(robertocn): Look into using self.m.json.input(contents) instead of
- # local_file.
- self.m.gsutil.upload(local_file, BUCKET, gs_filename)
-
- def upload_job_url(self):
- """Puts the URL to the job's status on a GS file."""
- # If we are running the test locally there is no need for this.
- if self.local_test_enabled():
- return # pragma: no cover
- gs_filename = RESULTS_GS_DIR + '/' + self.m.properties.get(
- 'job_name')
- if 'TESTING_MASTER_HOST' in os.environ: # pragma: no cover
- url = "http://%s:8041/json/builders/%s/builds/%s" % (
- os.environ['TESTING_MASTER_HOST'],
- self.m.properties['buildername'],
- self.m.properties['buildnumber'])
+ if run_results.get('retcodes'):
+ run_results.update({'values': run_results['retcodes']})
else:
- url = "http://build.chromium.org/p/%s/json/builders/%s/builds/%s" % (
- self.m.properties['mastername'],
- self.m.properties['buildername'],
- self.m.properties['buildnumber'])
- local_save_results = self.m.python('saving url to temp file',
- self.resource('put_temp.py'),
- stdout=self.m.raw_io.output(),
- stdin=self.m.raw_io.input(url))
- local_file = local_save_results.stdout.splitlines()[0].strip()
- self.m.gsutil.upload(
- local_file, BUCKET, gs_filename, name=str(gs_filename))
+ if 'values' in run_results:
+ return run_results
+ args = [run_results['valueset_paths'] or run_results['chartjson_paths'],
+ cfg['metric']]
+ step_results = self.m.python(
+ name='Parsing results',
+ script=self.m.path['catapult'].join(*PARSER_PATH),
+ args=args,
+ stdout=self.m.raw_io.output(),
+ step_test_data=lambda: self.m.json.raw_io.output(
+ "[10, 10, 10, 10]"))
+ raw_values = json.loads(step_results.stdout)
+ if raw_values:
+ run_results.update({'values': raw_values})
+ return run_results
+ run_results.update({'error': 'No values to aggregate.'})
+ return run_results

Powered by Google App Engine
This is Rietveld 408576698