| Index: scripts/slave/recipe_modules/auto_bisect/api.py
|
| diff --git a/scripts/slave/recipe_modules/auto_bisect/api.py b/scripts/slave/recipe_modules/auto_bisect/api.py
|
| index e37274f929e97baf702ac0fb9626026f85ab010e..5c227022b809d2ec4e50092d755c2347d11d8e37 100644
|
| --- a/scripts/slave/recipe_modules/auto_bisect/api.py
|
| +++ b/scripts/slave/recipe_modules/auto_bisect/api.py
|
| @@ -134,7 +134,9 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| step_name,
|
| self.resource('fetch_revision_info.py'),
|
| [revision.commit_hash, revision.depot_name],
|
| - stdout=self.m.json.output())
|
| + stdout=self.m.json.output(),
|
| + step_test_data=lambda: self._test_data['cl_info'][revision.commit_hash],
|
| + )
|
| return result.stdout
|
|
|
| def _commit_info(self, commit_hash, url, step_name=None): # pragma: no cover
|
| @@ -203,7 +205,7 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| xvfb=True, **kwargs)
|
|
|
| def run_local_test_run(self, test_config_params,
|
| - skip_download=False): # pragma: no cover
|
| + skip_download=False, **kwargs): # pragma: no cover
|
| """Starts a test run on the same machine.
|
|
|
| This is for the merged director/tester flow.
|
| @@ -215,9 +217,9 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| update_step = None
|
| else:
|
| update_step = self._SyncRevisionToTest(test_config_params)
|
| - self.start_test_run_for_bisect(update_step, self.bot_db,
|
| - test_config_params, run_locally=True,
|
| - skip_download=skip_download)
|
| + return self.start_test_run_for_bisect(
|
| + update_step, self.bot_db, test_config_params, run_locally=True,
|
| + skip_download=skip_download, **kwargs)
|
|
|
| def ensure_checkout(self, *args, **kwargs):
|
| if self.working_dir:
|
| @@ -247,13 +249,11 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
|
|
| def start_test_run_for_bisect(self, update_step, bot_db,
|
| test_config_params, run_locally=False,
|
| - skip_download=False):
|
| + skip_download=False, **kwargs):
|
| mastername = self.m.properties.get('mastername')
|
| buildername = self.m.properties.get('buildername')
|
| bot_config = bot_db.get_bot_config(mastername, buildername)
|
| build_archive_url = test_config_params['parent_build_archive_url']
|
| - if not run_locally:
|
| - self.m.bisect_tester.upload_job_url()
|
| if not skip_download:
|
| if self.m.chromium.c.TARGET_PLATFORM == 'android':
|
| # The best way to ensure the old build directory is not used is to
|
| @@ -296,7 +296,9 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| build_revision=test_config_params['parent_got_revision'],
|
| override_bot_type='tester')
|
|
|
| - tests = [self.m.chromium_tests.steps.BisectTest(test_config_params)]
|
| + tests = [
|
| + self.m.chromium_tests.steps.BisectTest(
|
| + test_config_params, **kwargs)]
|
|
|
| if not tests: # pragma: no cover
|
| return
|
| @@ -326,6 +328,7 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| self.deploy_apk_on_device(
|
| self.full_deploy_script, deploy_apks, deploy_args)
|
| test_runner()
|
| + return tests[0].run_results
|
|
|
| def deploy_apk_on_device(self, deploy_script, deploy_apks, deploy_args):
|
| """Installs apk on the android device."""
|
| @@ -375,8 +378,7 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| flags['do_not_nest_wait_for_revision'] = kwargs.pop(
|
| 'do_not_nest_wait_for_revision')
|
| if bot_db is None: # pragma: no cover
|
| - self.bot_db = api.chromium_tests.create_bot_db_from_master_dict(
|
| - '', None, None)
|
| + self.bot_db = api.chromium_tests.create_bot_db_from_master_dict('', None)
|
| else:
|
| self.bot_db = bot_db
|
|
|
| @@ -421,3 +423,32 @@ class AutoBisectApi(recipe_api.RecipeApi):
|
| else:
|
| self.ensure_checkout()
|
| api.chromium_android.common_tests_final_steps()
|
| +
|
| + def stat_compare(self, values_a, values_b, metric,
|
| + output_format='chartjson', **kwargs):
|
| + """Compares samples using catapult's statistics implementation.
|
| +
|
| + Args:
|
| + values_a, values_b: lists of paths to the json files containing the values
|
| + produced by the test.
|
| + metric: the name of the metric as sent by dashboard.
|
| + output_format: either 'chartjson' or 'valueset'
|
| +
|
| + Returns:
|
| + a dict containing 'result' which may be True, False or 'needMoreData', as
|
| + well as details about each sample ('debug_values', 'mean' and 'std_dev').
|
| +
|
| + """
|
| + args = [','.join(map(str, values_a)),
|
| + ','.join(map(str, values_b)),
|
| + metric,
|
| + '--' + output_format]
|
| +
|
| + script = self.m.path['catapult'].join(
|
| + 'tracing', 'bin', 'compare_samples')
|
| + return self.m.python(
|
| + 'Compare samples',
|
| + script=script,
|
| + args=args,
|
| + stdout=self.m.json.output(),
|
| + **kwargs).stdout
|
|
|