| Index: scripts/slave/recipe_modules/swarming/api.py
|
| diff --git a/scripts/slave/recipe_modules/swarming/api.py b/scripts/slave/recipe_modules/swarming/api.py
|
| index 07b8ff919dac6a9254a5321c533f6810fadf2538..937f73cedac50b4608d768bd1f84dae8be1a78b9 100644
|
| --- a/scripts/slave/recipe_modules/swarming/api.py
|
| +++ b/scripts/slave/recipe_modules/swarming/api.py
|
| @@ -398,6 +398,37 @@ class SwarmingApi(recipe_api.RecipeApi):
|
| self._gtest_collect_step(test_launcher_summary_output, *args, **kw))
|
| return task
|
|
|
| + def telemetry_gpu_task(self, title, isolated_hash, extra_args=None, **kwargs):
|
| + """Returns a new SwarmingTask to run an isolated telemetry test on Swarming.
|
| +
|
| + Swarming recipe module knows how collect JSON file with test execution
|
| + summary produced by telemetry test launcher. Since telemetry tests do not
|
| + support sharding, no merging of the results is performed. Parsed JSON
|
| + summary is returned from the collect step.
|
| +
|
| + For meaning of the rest of the arguments see 'task' method.
|
| + """
|
| + extra_args = list(extra_args or [])
|
| +
|
| + # Ensure --output-dir is not already passed. We are going to overwrite it.
|
| + bad_args = any(x.startswith('--output-dir') for x in extra_args)
|
| + if bad_args: # pragma: no cover
|
| + raise ValueError('--output-dir should not be used.')
|
| +
|
| + extra_args.extend(['--output-format', 'json',
|
| + '--output-dir', '${ISOLATED_OUTDIR}'])
|
| +
|
| + # For the time being, assume that all Telemetry tests are not
|
| + # idempotent. As of this writing, Telemetry itself downloads some
|
| + # data from remote servers; additionally, some tests like the
|
| + # pixel_tests download reference images from cloud storage. It's
|
| + # safest to assume that these tests aren't idempotent, though we
|
| + # should work toward making them so.
|
| + task = self.task(title, isolated_hash, extra_args=extra_args,
|
| + idempotent=False, **kwargs)
|
| + task.collect_step = self._telemetry_gpu_collect_step
|
| + return task
|
| +
|
| def isolated_script_task(self, title, isolated_hash, extra_args=None,
|
| idempotent=False, **kwargs):
|
| """Returns a new SwarmingTask to run an isolated script test on Swarming.
|
| @@ -680,6 +711,61 @@ class SwarmingApi(recipe_api.RecipeApi):
|
| self._display_pending(gtest_results.raw.get('swarming_summary', {}),
|
| step_result.presentation)
|
|
|
| + def _telemetry_gpu_collect_step(self, task, **kwargs):
|
| + step_test_data = kwargs.pop('step_test_data', None)
|
| + if not step_test_data:
|
| + step_test_data = self.m.test_utils.test_api.canned_telemetry_gpu_output(
|
| + passing=True, is_win=self.m.platform.is_win, swarming=True)
|
| +
|
| + args=self.get_collect_cmd_args(task)
|
| + args.extend(['--task-output-dir', self.m.raw_io.output_dir()])
|
| +
|
| + try:
|
| + self.m.python(
|
| + name=self._get_step_name('', task),
|
| + script=self.m.swarming_client.path.join('swarming.py'),
|
| + args=args, step_test_data=lambda: step_test_data,
|
| + **kwargs)
|
| + finally:
|
| + # Regardless of the outcome of the test (pass or fail), we try to parse
|
| + # the results. If any error occurs while parsing results, then we set them
|
| + # to None, which will be treated as invalid test results by
|
| + # SwarmingTelemetryGPUTest class in recipe_modules/chromium/steps.py. Note
|
| + # that try-except block below will not mask the recipe_api.StepFailure
|
| + # exception from the collect step above. Instead it is being allowed to
|
| + # propagate after the results have been parsed.
|
| + try:
|
| + step_result = self.m.step.active_result
|
| + outdir_json = self.m.json.dumps(step_result.raw_io.output_dir, indent=2)
|
| + step_result.presentation.logs['outdir_json'] = outdir_json.splitlines()
|
| +
|
| + # Check if it's an internal failure.
|
| + summary = self.m.json.loads(
|
| + step_result.raw_io.output_dir['summary.json'])
|
| + if any(shard['internal_failure'] for shard in summary['shards']):
|
| + raise recipe_api.InfraFailure('Internal swarming failure.')
|
| +
|
| + # TODO(sergiyb): Combine telemetry results from multiple shards rather
|
| + # than assuming that there is always just one shard.
|
| + assert len(summary['shards']) == 1
|
| + results_raw = step_result.raw_io.output_dir[
|
| + self.m.path.join('0', 'results.json')]
|
| +
|
| + # GPU test launcher may bail out early with return code 0 and empty
|
| + # results file if there were no tests to run, e.g. when all tests are
|
| + # disabled on current platform.
|
| + # TODO(sergiyb): We should instead rewrite run_gpu_test.py to always
|
| + # write valid results.json regardless of the return code.
|
| + if step_result.retcode == 0 and results_raw == '':
|
| + step_result.telemetry_results = {'per_page_values': [], 'pages': []}
|
| + else:
|
| + step_result.telemetry_results = self.m.json.loads(results_raw)
|
| +
|
| + self._display_pending(summary, step_result.presentation)
|
| + except Exception as e:
|
| + self.m.step.active_result.presentation.logs['no_results_exc'] = [str(e)]
|
| + self.m.step.active_result.telemetry_results = None
|
| +
|
| def _isolated_script_collect_step(self, task, **kwargs):
|
| step_test_data = kwargs.pop('step_test_data', None)
|
| if not step_test_data:
|
|
|