| Index: scripts/slave/recipe_modules/chromium/steps.py
 | 
| diff --git a/scripts/slave/recipe_modules/chromium/steps.py b/scripts/slave/recipe_modules/chromium/steps.py
 | 
| index e0bed85778d58cd2a8331aeaee62efcd30f1a3a0..511892173328f01194a632569c903e7f8402f1ae 100644
 | 
| --- a/scripts/slave/recipe_modules/chromium/steps.py
 | 
| +++ b/scripts/slave/recipe_modules/chromium/steps.py
 | 
| @@ -377,76 +377,55 @@
 | 
|      return self._results[suffix].failures
 | 
|  
 | 
|  
 | 
| -class PythonBasedTest(Test):
 | 
| -  @staticmethod
 | 
| -  def compile_targets(_):
 | 
| -    return []
 | 
| -
 | 
| -  def run_step(self, api, suffix, cmd_args, **kwargs):
 | 
| -    raise NotImplementedError()
 | 
| -
 | 
| -  def run(self, api, suffix):
 | 
| -    args = ['--write-full-results-to',
 | 
| -            api.json.test_results(add_json_log=False)]
 | 
| -    if suffix == 'without patch':
 | 
| -      args.extend(self.failures(api, 'with patch'))
 | 
| -
 | 
| -    def followup_fn(step_result):
 | 
| -      r = step_result.json.test_results
 | 
| -      p = step_result.presentation
 | 
| -
 | 
| -      p.step_text += api.test_utils.format_step_text([
 | 
| -        ['unexpected_failures:', r.unexpected_failures.keys()],
 | 
| -      ])
 | 
| -
 | 
| -    return self.run_step(
 | 
| -        api, suffix, args, can_fail_build=(not suffix),
 | 
| -        step_test_data=lambda: api.json.test_api.canned_test_output(
 | 
| -            True), followup_fn=followup_fn)
 | 
| -
 | 
| -  def has_valid_results(self, api, suffix):
 | 
| -    # TODO(dpranke): we should just return zero/nonzero for success/fail.
 | 
| -    # crbug.com/357866
 | 
| -    step = api.step_history[self._step_name(suffix)]
 | 
| -    return (step.json.test_results.valid and
 | 
| -            step.retcode <= step.json.test_results.MAX_FAILURES_EXIT_STATUS)
 | 
| -
 | 
| -  def failures(self, api, suffix):
 | 
| -    sn = self._step_name(suffix)
 | 
| -    return api.step_history[sn].json.test_results.unexpected_failures
 | 
| -
 | 
| -
 | 
| -class MojoPythonTests(PythonBasedTest):  # pylint: disable=W0232
 | 
| -  name = 'mojo_python_tests'
 | 
| -
 | 
| -  def run_step(self, api, suffix, cmd_args, **kwargs):
 | 
| -    return api.python(self._step_name(suffix),
 | 
| -                      api.path['checkout'].join('mojo', 'tools',
 | 
| -                                                'run_mojo_python_tests.py'),
 | 
| -                      cmd_args, **kwargs)
 | 
| -
 | 
| -
 | 
| -class TelemetryUnitTests(PythonBasedTest):  # pylint: disable=W0232
 | 
| +class TelemetryUnitTests(Test):
 | 
|    name = 'telemetry_unittests'
 | 
|  
 | 
| -  @staticmethod
 | 
| -  def compile_targets(_):
 | 
| -      return ['chrome']
 | 
| -
 | 
| -  def run_step(self, api, suffix, cmd_args, **kwargs):
 | 
| -    return api.chromium.run_telemetry_unittests(suffix, cmd_args, **kwargs)
 | 
| -
 | 
| -
 | 
| -class TelemetryPerfUnitTests(PythonBasedTest):
 | 
| +  def run(self, api, suffix):
 | 
| +    # Until telemetry tests output JSON, need to fail on failure with patch.
 | 
| +    # Otherwise, if the tests were failing on trunks and a cl introduces a
 | 
| +    # new regression the cl would land since the failure text is hardcoded
 | 
| +    # below. http://crbug.com/359521.
 | 
| +    return api.chromium.run_telemetry_unittests(
 | 
| +        suffix, always_run=True, can_fail_build=True)
 | 
| +
 | 
| +  @staticmethod
 | 
| +  def compile_targets(_):
 | 
| +    return ['chrome']
 | 
| +
 | 
| +  def has_valid_results(self, api, suffix):
 | 
| +    return True
 | 
| +
 | 
| +  def failures(self, api, suffix):
 | 
| +    # TODO(phajdan.jr): Make it possible to retry individual failing telemetry
 | 
| +    # tests (add JSON).
 | 
| +    if api.step_history[self._step_name(suffix)].retcode:
 | 
| +      return ['telemetry_unittest']
 | 
| +    return []
 | 
| +
 | 
| +class TelemetryPerfUnitTests(Test):
 | 
|    name = 'telemetry_perf_unittests'
 | 
|  
 | 
| +  def run(self, api, suffix):
 | 
| +    # Until telemetry tests output JSON, need to fail on failure with patch.
 | 
| +    # Otherwise, if the tests were failing on trunks and a cl introduces a
 | 
| +    # new regression the cl would land since the failure text is hardcoded
 | 
| +    # below. http://crbug.com/359521.
 | 
| +    return api.chromium.run_telemetry_perf_unittests(
 | 
| +        suffix, always_run=True, can_fail_build=True)
 | 
| +
 | 
|    @staticmethod
 | 
|    def compile_targets(_):
 | 
|      return ['chrome']
 | 
|  
 | 
| -  def run_step(self, api, suffix, cmd_args, **kwargs):
 | 
| -    return api.chromium.run_telemetry_perf_unittests(suffix, cmd_args,
 | 
| -                                                     **kwargs)
 | 
| +  def has_valid_results(self, api, suffix):
 | 
| +    return True
 | 
| +
 | 
| +  def failures(self, api, suffix):
 | 
| +    # TODO(phajdan.jr): Make it possible to retry individual failing telemetry
 | 
| +    # tests (add JSON).
 | 
| +    if api.step_history[self._step_name(suffix)].retcode:
 | 
| +      return ['telemetry_perf_unittests']
 | 
| +    return []
 | 
|  
 | 
|  
 | 
|  class NaclIntegrationTest(Test):  # pylint: disable=W0232
 | 
| @@ -508,10 +487,52 @@
 | 
|      return [self.compile_target]
 | 
|  
 | 
|  
 | 
| +class MojoPythonTests(Test):  # pylint: disable=W0232
 | 
| +  name = 'mojo_python_tests'
 | 
| +
 | 
| +  @staticmethod
 | 
| +  def compile_targets(_):
 | 
| +    return []
 | 
| +
 | 
| +  def run(self, api, suffix):
 | 
| +    args = ['--write-full-results-to',
 | 
| +            api.json.test_results(add_json_log=False)]
 | 
| +    if suffix == 'without patch':
 | 
| +      args.extend(self.failures(api, 'with patch'))
 | 
| +
 | 
| +    def followup_fn(step_result):
 | 
| +      r = step_result.json.test_results
 | 
| +      p = step_result.presentation
 | 
| +
 | 
| +      p.step_text += api.test_utils.format_step_text([
 | 
| +        ['unexpected_failures:', r.unexpected_failures.keys()],
 | 
| +      ])
 | 
| +
 | 
| +    return api.python(
 | 
| +        self._step_name(suffix),
 | 
| +        api.path['checkout'].join(
 | 
| +            'mojo',
 | 
| +            'tools',
 | 
| +            'run_mojo_python_tests.py'),
 | 
| +        args,
 | 
| +        can_fail_build=(not suffix),
 | 
| +        always_run=True,
 | 
| +        step_test_data=lambda: api.json.test_api.canned_test_output(
 | 
| +            True), followup_fn=followup_fn)
 | 
| +
 | 
| +  def has_valid_results(self, api, suffix):
 | 
| +    # TODO(dpranke): we should just return zero/nonzero for success/fail.
 | 
| +    # crbug.com/357866
 | 
| +    step = api.step_history[self._step_name(suffix)]
 | 
| +    return (step.json.test_results.valid and
 | 
| +            step.retcode <= step.json.test_results.MAX_FAILURES_EXIT_STATUS)
 | 
| +
 | 
| +  def failures(self, api, suffix):
 | 
| +    sn = self._step_name(suffix)
 | 
| +    return api.step_history[sn].json.test_results.unexpected_failures
 | 
| +
 | 
| +
 | 
|  class BlinkTest(Test):
 | 
| -  # TODO(dpranke): This should be converted to a PythonBasedTest, although it
 | 
| -  # will need custom behavior because we archive the results as well.
 | 
| -
 | 
|    name = 'webkit_tests'
 | 
|  
 | 
|    def __init__(self, api):
 | 
| 
 |