Index: telemetry/telemetry/internal/story_runner_unittest.py |
diff --git a/telemetry/telemetry/internal/story_runner_unittest.py b/telemetry/telemetry/internal/story_runner_unittest.py |
index 950c18460ea89714da92c6686a4fed5ea2a77385..94e232128cc2e7670cd471a779c18c03500ab4f9 100644 |
--- a/telemetry/telemetry/internal/story_runner_unittest.py |
+++ b/telemetry/telemetry/internal/story_runner_unittest.py |
@@ -153,7 +153,22 @@ def SetupStorySet(allow_multiple_story_states, story_state_list): |
name='story%d' % i)) |
return story_set |
+class _DisableBenchmarkExpectations( |
+ story_module.expectations.StoryExpectations): |
+ def SetExpectations(self): |
+ self.PermanentlyDisableBenchmark( |
+ [story_module.expectations.ALL], 'crbug.com/123') |
+ |
+class _DisableStoryExpectations(story_module.expectations.StoryExpectations): |
+ def SetExpectations(self): |
+ self.DisableStory('one', [story_module.expectations.ALL], 'crbug.com/123') |
+ |
+ |
class FakeBenchmark(benchmark.Benchmark): |
+ def __init__(self): |
+ super(FakeBenchmark, self).__init__() |
+ self._disabled = False |
+ |
@classmethod |
def Name(cls): |
return 'fake' |
@@ -163,6 +178,20 @@ class FakeBenchmark(benchmark.Benchmark): |
def page_set(self): |
return story_module.StorySet() |
+ @property |
+ def Disabled(self): |
+ return self._disabled |
+ |
+ @Disabled.setter |
+ def Disabled(self, b): |
+ assert isinstance(b, bool) |
+ self._disabled = b |
+ |
+ def GetExpectations(self): |
+ if self.Disabled: |
+ return _DisableBenchmarkExpectations() |
+ return story_module.expectations.StoryExpectations() |
+ |
def _GetOptionForUnittest(): |
options = options_for_unittests.GetCopy() |
@@ -186,6 +215,10 @@ def GetNumberOfSuccessfulPageRuns(results): |
return len([run for run in results.all_page_runs if run.ok or run.skipped]) |
+def GetNumberOfSkippedPageRuns(results): |
+ return len([run for run in results.all_page_runs if run.skipped]) |
+ |
+ |
class TestOnlyException(Exception): |
pass |
@@ -204,6 +237,18 @@ class SkipValueMatcher(object): |
return isinstance(other, skip.SkipValue) |
+class _Measurement(legacy_page_test.LegacyPageTest): |
+ i = 0 |
+ def RunPage(self, page, _, results): |
+ self.i += 1 |
+ results.AddValue(scalar.ScalarValue( |
+ page, 'metric', 'unit', self.i, |
+ improvement_direction=improvement_direction.UP)) |
+ |
+ def ValidateAndMeasurePage(self, page, tab, results): |
+ pass |
+ |
+ |
class StoryRunnerTest(unittest.TestCase): |
def setUp(self): |
self.fake_stdout = StringIO.StringIO() |
@@ -619,23 +664,11 @@ class StoryRunnerTest(unittest.TestCase): |
story_set.AddStory(blank_story) |
story_set.AddStory(green_story) |
- class Measurement(legacy_page_test.LegacyPageTest): |
- i = 0 |
- def RunPage(self, page, _, results): |
- self.i += 1 |
- results.AddValue(scalar.ScalarValue( |
- page, 'metric', 'unit', self.i, |
- improvement_direction=improvement_direction.UP)) |
- |
- def ValidateAndMeasurePage(self, page, tab, results): |
- pass |
- |
self.options.pageset_repeat = 2 |
self.options.output_formats = [] |
results = results_options.CreateResults( |
EmptyMetadataForTest(), self.options) |
- story_runner.Run( |
- Measurement(), story_set, self.options, results) |
+ story_runner.Run(_Measurement(), story_set, self.options, results) |
summary = summary_module.Summary(results.all_page_specific_values) |
values = summary.interleaved_computed_per_page_values_and_summaries |
@@ -657,6 +690,60 @@ class StoryRunnerTest(unittest.TestCase): |
self.assertIn(green_value, values) |
self.assertIn(merged_value, values) |
+ def testRunStoryDisabledStory(self): |
+ story_set = story_module.StorySet() |
+ story_one = DummyLocalStory(TestSharedPageState, name='one') |
+ story_set.AddStory(story_one) |
+ results = results_options.CreateResults( |
+ EmptyMetadataForTest(), self.options) |
+ |
+ story_runner.Run(_Measurement(), story_set, self.options, results, |
+ expectations=_DisableStoryExpectations()) |
+ summary = summary_module.Summary(results.all_page_specific_values) |
+ values = summary.interleaved_computed_per_page_values_and_summaries |
+ |
+ self.assertEquals(1, GetNumberOfSuccessfulPageRuns(results)) |
+ self.assertEquals(1, GetNumberOfSkippedPageRuns(results)) |
+ self.assertEquals(0, len(results.failures)) |
+ self.assertEquals(0, len(values)) |
+ |
+ def testRunStoryOneDisabledOneNot(self): |
+ story_set = story_module.StorySet() |
+ story_one = DummyLocalStory(TestSharedPageState, name='one') |
+ story_two = DummyLocalStory(TestSharedPageState, name='two') |
+ story_set.AddStory(story_one) |
+ story_set.AddStory(story_two) |
+ results = results_options.CreateResults( |
+ EmptyMetadataForTest(), self.options) |
+ |
+ story_runner.Run(_Measurement(), story_set, self.options, results, |
+ expectations=_DisableStoryExpectations()) |
+ summary = summary_module.Summary(results.all_page_specific_values) |
+ values = summary.interleaved_computed_per_page_values_and_summaries |
+ |
+ self.assertEquals(2, GetNumberOfSuccessfulPageRuns(results)) |
+ self.assertEquals(1, GetNumberOfSkippedPageRuns(results)) |
+ self.assertEquals(0, len(results.failures)) |
+ self.assertEquals(2, len(values)) |
+ |
+ def testRunStoryDisabledOverriddenByFlag(self): |
+ story_set = story_module.StorySet() |
+ story_one = DummyLocalStory(TestSharedPageState, name='one') |
+ story_set.AddStory(story_one) |
+ self.options.run_disabled_tests = True |
+ results = results_options.CreateResults( |
+ EmptyMetadataForTest(), self.options) |
+ |
+ story_runner.Run(_Measurement(), story_set, self.options, results, |
+ expectations=_DisableStoryExpectations()) |
+ summary = summary_module.Summary(results.all_page_specific_values) |
+ values = summary.interleaved_computed_per_page_values_and_summaries |
+ |
+ self.assertEquals(1, GetNumberOfSuccessfulPageRuns(results)) |
+ self.assertEquals(0, GetNumberOfSkippedPageRuns(results)) |
+ self.assertEquals(0, len(results.failures)) |
+ self.assertEquals(2, len(values)) |
+ |
@decorators.Disabled('chromeos') # crbug.com/483212 |
def testUpdateAndCheckArchives(self): |
usr_stub = system_stub.Override(story_runner, ['cloud_storage']) |
@@ -1050,8 +1137,7 @@ class StoryRunnerTest(unittest.TestCase): |
mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results) |
]) |
- def testRunBenchmarkTimeDuration(self): |
- fake_benchmark = FakeBenchmark() |
+ def _GenerateBaseBrowserFinderOptions(self): |
options = fakes.CreateBrowserFinderOptions() |
options.upload_results = None |
options.suppress_gtest_report = False |
@@ -1060,6 +1146,41 @@ class StoryRunnerTest(unittest.TestCase): |
options.max_failures = 100 |
options.pageset_repeat = 1 |
options.output_formats = ['chartjson'] |
+ options.run_disabled_tests = False |
+ return options |
+ |
+ def testRunBenchmarkDisabledBenchmark(self): |
+ fake_benchmark = FakeBenchmark() |
+ fake_benchmark.Disabled = True |
+ options = self._GenerateBaseBrowserFinderOptions() |
+ tmp_path = tempfile.mkdtemp() |
+ try: |
+ options.output_dir = tmp_path |
+ story_runner.RunBenchmark(fake_benchmark, options) |
+ with open(os.path.join(tmp_path, 'results-chart.json')) as f: |
+ data = json.load(f) |
+ self.assertFalse(data['enabled']) |
+ finally: |
+ shutil.rmtree(tmp_path) |
+ |
+ def testRunBenchmarkDisabledBenchmarkCannotOverriddenByCommandLine(self): |
+ fake_benchmark = FakeBenchmark() |
+ fake_benchmark.Disabled = True |
+ options = self._GenerateBaseBrowserFinderOptions() |
+ options.run_disabled_tests = True |
+ temp_path = tempfile.mkdtemp() |
+ try: |
+ options.output_dir = temp_path |
+ story_runner.RunBenchmark(fake_benchmark, options) |
+ with open(os.path.join(temp_path, 'results-chart.json')) as f: |
+ data = json.load(f) |
+ self.assertFalse(data['enabled']) |
+ finally: |
+ shutil.rmtree(temp_path) |
+ |
+ def testRunBenchmarkTimeDuration(self): |
+ fake_benchmark = FakeBenchmark() |
+ options = self._GenerateBaseBrowserFinderOptions() |
with mock.patch('telemetry.internal.story_runner.time.time') as time_patch: |
# 3, because telemetry code asks for the time at some point |