Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(183)

Unified Diff: tools/telemetry/telemetry/page/page_runner_unittest.py

Issue 752883002: Revert of [Telemetry] Introduce shared_user_story_state for real. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « tools/telemetry/telemetry/page/page_runner.py ('k') | tools/telemetry/telemetry/page/profile_generator.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/telemetry/telemetry/page/page_runner_unittest.py
diff --git a/tools/telemetry/telemetry/page/page_runner_unittest.py b/tools/telemetry/telemetry/page/page_runner_unittest.py
index 8d739e78bfec01355dc4d38a68afc4ca1e5d8027..51d540a6bce475d29f17f7989ca39230413adc43 100644
--- a/tools/telemetry/telemetry/page/page_runner_unittest.py
+++ b/tools/telemetry/telemetry/page/page_runner_unittest.py
@@ -17,13 +17,13 @@
from telemetry.core import user_agent
from telemetry.core import util
from telemetry.page import page as page_module
+from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import results_options
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import system_stub
-from telemetry.user_story import user_story_runner
from telemetry.util import exception_formatter as exception_formatter_module
from telemetry.value import scalar
from telemetry.value import string
@@ -44,11 +44,11 @@
pass
-def SetUpUserStoryRunnerArguments(options):
+def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
- user_story_runner.AddCommandLineArgs(parser)
+ page_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
- user_story_runner.ProcessCommandLineArgs(parser, options)
+ page_runner.ProcessCommandLineArgs(parser, options)
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
@@ -83,25 +83,23 @@
pass
-# TODO: remove test cases that use real browsers and replace with a
-# user_story_runner or shared_page_state unittest that tests the same logic.
class PageRunnerTests(unittest.TestCase):
# TODO(nduca): Move the basic "test failed, test succeeded" tests from
# page_test_unittest to here.
def setUp(self):
- self._user_story_runner_logging_stub = None
+ self._page_runner_logging_stub = None
def SuppressExceptionFormatting(self):
- user_story_runner.exception_formatter = FakeExceptionFormatterModule
- self._user_story_runner_logging_stub = system_stub.Override(
- user_story_runner, ['logging'])
+ page_runner.exception_formatter = FakeExceptionFormatterModule
+ self._page_runner_logging_stub = system_stub.Override(
+ page_runner, ['logging'])
def RestoreExceptionFormatter(self):
- user_story_runner.exception_formatter = exception_formatter_module
- if self._user_story_runner_logging_stub:
- self._user_story_runner_logging_stub.Restore()
- self._user_story_runner_logging_stub = None
+ page_runner.exception_formatter = exception_formatter_module
+ if self._page_runner_logging_stub:
+ self._page_runner_logging_stub.Restore()
+ self._page_runner_logging_stub = None
def tearDown(self):
self.RestoreExceptionFormatter()
@@ -120,9 +118,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Test(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Test(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -152,9 +150,9 @@
options.output_formats = ['none']
options.suppress_gtest_report = True
test = Test()
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals(2, test.run_count)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -182,9 +180,9 @@
options.output_formats = ['none']
options.suppress_gtest_report = True
test = Test()
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals(2, test.run_count)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -213,9 +211,9 @@
options.output_formats = ['none']
options.suppress_gtest_report = True
test = Test()
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals(2, test.run_count)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures))
@@ -231,9 +229,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(DummyTest(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(DummyTest(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -263,27 +261,27 @@
options.page_repeat = 1
options.pageset_repeat = 1
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values))
options.page_repeat = 1
options.pageset_repeat = 2
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values))
options.page_repeat = 2
options.pageset_repeat = 1
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values))
@@ -292,9 +290,9 @@
options.suppress_gtest_report = True
options.page_repeat = 1
options.pageset_repeat = 1
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values))
@@ -323,14 +321,14 @@
options.results_label = None
options.page_repeat = 1
options.pageset_repeat = 2
- SetUpUserStoryRunnerArguments(options)
+ SetUpPageRunnerArguments(options)
output = StringIO.StringIO()
real_stdout = sys.stdout
sys.stdout = output
try:
results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
results.PrintSummary()
contents = output.getvalue()
self.assertEquals(4, len(GetSuccessfulPageRuns(results)))
@@ -386,9 +384,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
+ SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ page_runner.Run(test, ps, expectations, options, results)
finally:
os.remove(f.name)
@@ -417,13 +415,13 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
- # Ensure that user_story_runner forces exactly 1 tab before running a page.
+ # Ensure that page_runner forces exactly 1 tab before running a page.
@decorators.Enabled('has tabs')
def testOneTab(self):
ps = page_set.PageSet()
@@ -448,12 +446,12 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
-
- # Ensure that user_story_runner allows the test to customize the browser
- # before it launches.
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
+
+ # Ensure that page_runner allows the test to customize the browser before it
+ # launches.
def testBrowserBeforeLaunch(self):
ps = page_set.PageSet()
expectations = test_expectations.TestExpectations()
@@ -482,9 +480,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
def testRunPageWithStartupUrl(self):
ps = page_set.PageSet()
@@ -514,13 +512,13 @@
if not browser_finder.FindBrowser(options):
return
test = Measurement()
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertEquals('about:blank', options.browser_options.startup_url)
self.assertTrue(test.browser_restarted)
- # Ensure that user_story_runner calls cleanUp when a page run fails.
+ # Ensure that page_runner calls cleanUp when a page run fails.
def testCleanUpPage(self):
ps = page_set.PageSet()
expectations = test_expectations.TestExpectations()
@@ -544,9 +542,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
assert test.did_call_clean_up
# Ensure skipping the test if page cannot be run on the browser
@@ -582,9 +580,9 @@
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(test, ps, expectations, options, results)
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(test, ps, expectations, options, results)
self.assertFalse(test.will_navigate_to_page_called)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
@@ -617,9 +615,9 @@
if not max_failures is None:
options.max_failures = max_failures
expected_max_failures = max_failures
- SetUpUserStoryRunnerArguments(options)
- results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Test(max_failures=2),
+ SetUpPageRunnerArguments(options)
+ results = results_options.CreateResults(EmptyMetadataForTest(), options)
+ page_runner.Run(Test(max_failures=2),
ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
# Runs up to max_failures+1 failing tests before stopping, since
@@ -656,9 +654,9 @@
options.output_dir = tempfile.mkdtemp()
options.profiler = 'trace'
try:
- SetUpUserStoryRunnerArguments(options)
+ SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
- user_story_runner.Run(Measurement(), ps, expectations, options, results)
+ page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values))
« no previous file with comments | « tools/telemetry/telemetry/page/page_runner.py ('k') | tools/telemetry/telemetry/page/profile_generator.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698