OLD | NEW |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import json | 5 import json |
6 import os | 6 import os |
7 | 7 |
8 from recipe_engine import recipe_api | 8 from recipe_engine import recipe_api |
9 from . import perf_test | 9 from . import perf_test |
10 | 10 |
11 BUCKET = 'chrome-perf' | 11 BUCKET = 'chrome-perf' |
12 RESULTS_GS_DIR = 'bisect-results' | 12 RESULTS_GS_DIR = 'bisect-results' |
| 13 PARSER_PATH = 'tracing/bin/parse_metric'.split('/') |
13 | 14 |
14 | 15 |
15 class BisectTesterApi(recipe_api.RecipeApi): | 16 class BisectTesterApi(recipe_api.RecipeApi): |
16 """A module for the bisect tester bot using the chromium recipe.""" | 17 """A module for the bisect tester bot using the chromium recipe.""" |
17 | 18 |
18 def __init__(self, **kwargs): | 19 def __init__(self, **kwargs): |
19 super(BisectTesterApi, self).__init__(**kwargs) | 20 super(BisectTesterApi, self).__init__(**kwargs) |
20 self._device_to_test = None | 21 self._device_to_test = None |
21 | 22 |
22 @property | 23 @property |
23 def device_to_test(self): | 24 def device_to_test(self): |
24 return self._device_to_test | 25 return self._device_to_test |
25 | 26 |
26 @device_to_test.setter | 27 @device_to_test.setter |
27 def device_to_test(self, value): | 28 def device_to_test(self, value): |
28 self._device_to_test = value | 29 self._device_to_test = value |
29 | 30 |
30 def local_test_enabled(self): | 31 def local_test_enabled(self): |
31 buildername = os.environ.get('BUILDBOT_BUILDERNAME') | 32 buildername = os.environ.get('BUILDBOT_BUILDERNAME') |
32 cr_config = self.m.chromium.c | 33 cr_config = self.m.chromium.c |
33 if buildername and buildername.endswith('_bisect') and cr_config or ( | 34 if buildername and buildername.endswith('_bisect') and cr_config or ( |
34 self.m.properties.get('local_test')): | 35 self.m.properties.get('local_test')): |
35 return True # pragma: no cover | 36 return True # pragma: no cover |
36 return False | 37 return False |
37 | 38 |
38 def load_config_from_dict(self, bisect_config): | 39 def load_config_from_dict(self, bisect_config): |
39 """Copies the required configuration keys to a new dict.""" | 40 """Copies the required configuration keys to a new dict.""" |
40 return { | 41 return { |
41 'command': bisect_config['command'], | 42 'command': bisect_config['command'], |
42 'metric': bisect_config.get('metric'), | 43 'metric': bisect_config.get('metric'), |
43 'repeat_count': int(bisect_config.get('repeat_count', 20)), | 44 'repeat_count': int(bisect_config.get('repeat_count', 20)), |
44 # The default is to NOT timeout, hence 0. | 45 # The default is to NOT timeout, hence 0. |
45 'max_time_minutes': float(bisect_config.get('max_time_minutes', 0)), | 46 'max_time_minutes': float(bisect_config.get('max_time_minutes', 0)), |
46 'test_type': bisect_config.get('test_type', 'perf') | 47 'test_type': bisect_config.get('test_type', 'perf') |
47 } | 48 } |
48 | 49 |
49 def run_test(self, test_config, **kwargs): | 50 def run_test(self, test_config, **kwargs): |
50 """Exposes perf tests implementation.""" | 51 """Exposes perf tests implementation.""" |
51 return perf_test.run_perf_test(self, test_config, **kwargs) | 52 return perf_test.run_perf_test(self, test_config, **kwargs) |
52 | 53 |
53 def digest_run_results(self, run_results, retcodes, cfg): | 54 def digest_run_results(self, run_results, cfg): |
54 # TODO(qyearsley): Change this to not use cfg or retcodes and just | |
55 # return values (or error) regardless of test_type. | |
56 if not run_results or not retcodes: # pragma: no cover | |
57 return {'error': 'No values to aggregate.'} | |
58 if cfg.get('test_type') == 'return_code': | 55 if cfg.get('test_type') == 'return_code': |
59 return {'values': retcodes} | 56 if run_results.get('retcodes'): |
60 return {'values': run_results['measured_values']} | 57 run_results.update({'values': run_results['retcodes']}) |
61 | |
62 def upload_results(self, output, results, retcodes, test_parameters): | |
63 """Puts the results as a JSON file in a GS bucket.""" | |
64 job_name = (test_parameters.get('job_name') or | |
65 self.m.properties.get('job_name')) | |
66 gs_filename = '%s/%s.results' % (RESULTS_GS_DIR, job_name) | |
67 contents = {'results': results, 'output': output, 'retcodes': retcodes} | |
68 contents_json = json.dumps(contents) | |
69 local_save_results = self.m.python('saving json to temp file', | |
70 self.resource('put_temp.py'), | |
71 stdout=self.m.raw_io.output(), | |
72 stdin=self.m.raw_io.input( | |
73 contents_json)) | |
74 | |
75 local_file = local_save_results.stdout.splitlines()[0].strip() | |
76 # TODO(robertocn): Look into using self.m.json.input(contents) instead of | |
77 # local_file. | |
78 self.m.gsutil.upload(local_file, BUCKET, gs_filename) | |
79 | |
80 def upload_job_url(self): | |
81 """Puts the URL to the job's status on a GS file.""" | |
82 # If we are running the test locally there is no need for this. | |
83 if self.local_test_enabled(): | |
84 return # pragma: no cover | |
85 gs_filename = RESULTS_GS_DIR + '/' + self.m.properties.get( | |
86 'job_name') | |
87 if 'TESTING_MASTER_HOST' in os.environ: # pragma: no cover | |
88 url = "http://%s:8041/json/builders/%s/builds/%s" % ( | |
89 os.environ['TESTING_MASTER_HOST'], | |
90 self.m.properties['buildername'], | |
91 self.m.properties['buildnumber']) | |
92 else: | 58 else: |
93 url = "http://build.chromium.org/p/%s/json/builders/%s/builds/%s" % ( | 59 if 'values' in run_results: |
94 self.m.properties['mastername'], | 60 return run_results |
95 self.m.properties['buildername'], | 61 args = [run_results['valueset_paths'] or run_results['chartjson_paths'], |
96 self.m.properties['buildnumber']) | 62 cfg['metric']] |
97 local_save_results = self.m.python('saving url to temp file', | 63 step_results = self.m.python( |
98 self.resource('put_temp.py'), | 64 name='Parsing results', |
99 stdout=self.m.raw_io.output(), | 65 script=self.m.path['catapult'].join(*PARSER_PATH), |
100 stdin=self.m.raw_io.input(url)) | 66 args=args, |
101 local_file = local_save_results.stdout.splitlines()[0].strip() | 67 stdout=self.m.raw_io.output(), |
102 self.m.gsutil.upload( | 68 step_test_data=lambda: self.m.json.raw_io.output( |
103 local_file, BUCKET, gs_filename, name=str(gs_filename)) | 69 "[10, 10, 10, 10]")) |
| 70 raw_values = json.loads(step_results.stdout) |
| 71 if raw_values: |
| 72 run_results.update({'values': raw_values}) |
| 73 return run_results |
| 74 run_results.update({'error': 'No values to aggregate.'}) |
| 75 return run_results |
OLD | NEW |