Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(311)

Unified Diff: telemetry/telemetry/internal/story_runner_unittest.py

Issue 2913383005: [Telemetry] Add temporary disabling of benchmark to story expectations. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « telemetry/telemetry/internal/story_runner.py ('k') | telemetry/telemetry/story/expectations.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: telemetry/telemetry/internal/story_runner_unittest.py
diff --git a/telemetry/telemetry/internal/story_runner_unittest.py b/telemetry/telemetry/internal/story_runner_unittest.py
index ec3f9e909296033310716cc6279f42ce7b36b904..bda23ac1155afcb466a7dca76a12009226bbd51a 100644
--- a/telemetry/telemetry/internal/story_runner_unittest.py
+++ b/telemetry/telemetry/internal/story_runner_unittest.py
@@ -153,12 +153,21 @@ def SetupStorySet(allow_multiple_story_states, story_state_list):
name='story%d' % i))
return story_set
-class _DisableBenchmarkExpectations(
+
+class _PermanentlyDisableBenchmarkExpectations(
story_module.expectations.StoryExpectations):
def SetExpectations(self):
self.PermanentlyDisableBenchmark(
[story_module.expectations.ALL], 'crbug.com/123')
+
+class _TemporarilyDisableBenchmarkExpectations(
+ story_module.expectations.StoryExpectations):
+ def SetExpectations(self):
+ self.TemporarilyDisableBenchmark(
+ [story_module.expectations.ALL], 'crbug.com/123')
+
+
class _DisableStoryExpectations(story_module.expectations.StoryExpectations):
def SetExpectations(self):
self.DisableStory('one', [story_module.expectations.ALL], 'crbug.com/123')
@@ -167,7 +176,8 @@ class _DisableStoryExpectations(story_module.expectations.StoryExpectations):
class FakeBenchmark(benchmark.Benchmark):
def __init__(self):
super(FakeBenchmark, self).__init__()
- self._disabled = False
+ self._permanently_disabled = False
+ self._temporarily_disabled = False
self._story_disabled = False
@classmethod
@@ -180,13 +190,22 @@ class FakeBenchmark(benchmark.Benchmark):
return story_module.StorySet()
@property
- def disabled(self):
- return self._disabled
+ def temporarily_disabled(self):
+ return self._temporarily_disabled
- @disabled.setter
- def disabled(self, b):
+ @temporarily_disabled.setter
+ def temporarily_disabled(self, b):
assert isinstance(b, bool)
- self._disabled = b
+ self._temporarily_disabled = b
+
+ @property
+ def permanently_disabled(self):
+ return self._permanently_disabled
+
+ @permanently_disabled.setter
+ def permanently_disabled(self, b):
+ assert isinstance(b, bool)
+ self._permanently_disabled = b
@property
def story_disabled(self):
@@ -200,8 +219,10 @@ class FakeBenchmark(benchmark.Benchmark):
def GetExpectations(self):
if self.story_disabled:
return _DisableStoryExpectations()
- if self.disabled:
- return _DisableBenchmarkExpectations()
+ if self.permanently_disabled:
+ return _PermanentlyDisableBenchmarkExpectations()
+ if self.temporarily_disabled:
+ return _TemporarilyDisableBenchmarkExpectations()
return story_module.expectations.StoryExpectations()
@@ -1161,9 +1182,44 @@ class StoryRunnerTest(unittest.TestCase):
options.run_disabled_tests = False
return options
- def testRunBenchmarkDisabledBenchmark(self):
+ def testRunBenchmarkTemporarilyDisabledBenchmark(self):
+ fake_benchmark = FakeBenchmark()
+ fake_benchmark.temporarily_disabled = True
+ options = self._GenerateBaseBrowserFinderOptions()
+ tmp_path = tempfile.mkdtemp()
+ try:
+ options.output_dir = tmp_path
+ story_runner.RunBenchmark(fake_benchmark, options)
+ with open(os.path.join(tmp_path, 'results-chart.json')) as f:
+ data = json.load(f)
+ self.assertFalse(data['enabled'])
+ finally:
+ shutil.rmtree(tmp_path)
+
+ def testRunBenchmarkTemporarilyDisabledBenchmarkCanBeOverriddenByCommandLine(
+ self):
fake_benchmark = FakeBenchmark()
- fake_benchmark.disabled = True
+ fake_benchmark.temporarily_disabled = True
+ options = self._GenerateBaseBrowserFinderOptions()
+ options.run_disabled_tests = True
+ temp_path = tempfile.mkdtemp()
+ try:
+ options.output_dir = temp_path
+ story_runner.RunBenchmark(fake_benchmark, options)
+ with open(os.path.join(temp_path, 'results-chart.json')) as f:
+ data = json.load(f)
+
+ self.assertEqual(len(data['charts']), 1)
+ charts = data['charts']
+ self.assertIn('benchmark_duration', charts)
+ duration = charts['benchmark_duration']
+ self.assertIn("summary", duration)
+ finally:
+ shutil.rmtree(temp_path)
+
+ def testRunBenchmarkPermanentlyDisabledBenchmark(self):
+ fake_benchmark = FakeBenchmark()
+ fake_benchmark.permanently_disabled = True
options = self._GenerateBaseBrowserFinderOptions()
tmp_path = tempfile.mkdtemp()
try:
@@ -1175,9 +1231,10 @@ class StoryRunnerTest(unittest.TestCase):
finally:
shutil.rmtree(tmp_path)
- def testRunBenchmarkDisabledBenchmarkCannotOverriddenByCommandLine(self):
+ def testRunBenchmarkPermanentlyDisabledBenchmarkCannotOverriddenByCommandLine(
+ self):
fake_benchmark = FakeBenchmark()
- fake_benchmark.disabled = True
+ fake_benchmark.permanently_disabled = True
options = self._GenerateBaseBrowserFinderOptions()
options.run_disabled_tests = True
temp_path = tempfile.mkdtemp()
« no previous file with comments | « telemetry/telemetry/internal/story_runner.py ('k') | telemetry/telemetry/story/expectations.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698