OLD | NEW |
---|---|
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import json | |
5 import math | 6 import math |
6 import os | 7 import os |
8 import shutil | |
7 import StringIO | 9 import StringIO |
8 import sys | 10 import sys |
11 import tempfile | |
9 import unittest | 12 import unittest |
10 | 13 |
11 from py_utils import cloud_storage # pylint: disable=import-error | 14 from py_utils import cloud_storage # pylint: disable=import-error |
12 | 15 |
13 from telemetry import benchmark | 16 from telemetry import benchmark |
17 from telemetry import page | |
14 from telemetry.core import exceptions | 18 from telemetry.core import exceptions |
15 from telemetry.core import util | 19 from telemetry.core import util |
16 from telemetry import decorators | 20 from telemetry import decorators |
17 from telemetry.internal.actions import page_action | 21 from telemetry.internal.actions import page_action |
18 from telemetry.internal.results import page_test_results | 22 from telemetry.internal.results import page_test_results |
19 from telemetry.internal.results import results_options | 23 from telemetry.internal.results import results_options |
20 from telemetry.internal import story_runner | 24 from telemetry.internal import story_runner |
21 from telemetry.internal.util import exception_formatter as ex_formatter_module | 25 from telemetry.internal.util import exception_formatter as ex_formatter_module |
22 from telemetry.page import page as page_module | 26 from telemetry.page import page as page_module |
23 from telemetry.page import legacy_page_test | 27 from telemetry.page import legacy_page_test |
24 from telemetry import story as story_module | 28 from telemetry import story as story_module |
29 from telemetry.testing import fakes | |
25 from telemetry.testing import options_for_unittests | 30 from telemetry.testing import options_for_unittests |
26 from telemetry.testing import system_stub | 31 from telemetry.testing import system_stub |
27 import mock | 32 import mock |
28 from telemetry.value import failure | 33 from telemetry.value import failure |
29 from telemetry.value import improvement_direction | 34 from telemetry.value import improvement_direction |
30 from telemetry.value import list_of_scalar_values | 35 from telemetry.value import list_of_scalar_values |
31 from telemetry.value import scalar | 36 from telemetry.value import scalar |
32 from telemetry.value import skip | 37 from telemetry.value import skip |
33 from telemetry.value import summary as summary_module | 38 from telemetry.value import summary as summary_module |
34 from telemetry.web_perf import story_test | 39 from telemetry.web_perf import story_test |
35 from telemetry.web_perf import timeline_based_measurement | 40 from telemetry.web_perf import timeline_based_measurement |
36 from telemetry.wpr import archive_info | 41 from telemetry.wpr import archive_info |
37 | 42 |
38 # This linter complains if we define classes nested inside functions. | 43 # This linter complains if we define classes nested inside functions. |
39 # pylint: disable=bad-super-call | 44 # pylint: disable=bad-super-call |
40 | 45 |
41 # pylint: disable=too-many-lines | 46 # pylint: disable=too-many-lines |
42 | 47 |
43 | |
44 class FakePlatform(object): | 48 class FakePlatform(object): |
45 def CanMonitorThermalThrottling(self): | 49 def CanMonitorThermalThrottling(self): |
46 return False | 50 return False |
47 | 51 |
48 def GetOSName(self): | 52 def GetOSName(self): |
49 pass | 53 pass |
50 | 54 |
51 def WaitForTemperature(self, _): | 55 def WaitForTemperature(self, _): |
52 pass | 56 pass |
53 | 57 |
54 def GetDeviceTypeName(self): | 58 def GetDeviceTypeName(self): |
55 return "GetDeviceTypeName" | 59 return "GetDeviceTypeName" |
56 | 60 |
57 | |
58 class TestSharedState(story_module.SharedState): | 61 class TestSharedState(story_module.SharedState): |
59 | 62 |
60 _platform = FakePlatform() | 63 _platform = FakePlatform() |
61 | 64 |
62 @classmethod | 65 @classmethod |
63 def SetTestPlatform(cls, platform): | 66 def SetTestPlatform(cls, platform): |
64 cls._platform = platform | 67 cls._platform = platform |
65 | 68 |
66 def __init__(self, test, options, story_set): | 69 def __init__(self, test, options, story_set): |
67 super(TestSharedState, self).__init__( | 70 super(TestSharedState, self).__init__( |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
144 def SetupStorySet(allow_multiple_story_states, story_state_list): | 147 def SetupStorySet(allow_multiple_story_states, story_state_list): |
145 if allow_multiple_story_states: | 148 if allow_multiple_story_states: |
146 story_set = MixedStateStorySet() | 149 story_set = MixedStateStorySet() |
147 else: | 150 else: |
148 story_set = story_module.StorySet() | 151 story_set = story_module.StorySet() |
149 for i, story_state in enumerate(story_state_list): | 152 for i, story_state in enumerate(story_state_list): |
150 story_set.AddStory(DummyLocalStory(story_state, | 153 story_set.AddStory(DummyLocalStory(story_state, |
151 name='story%d' % i)) | 154 name='story%d' % i)) |
152 return story_set | 155 return story_set |
153 | 156 |
157 class FakeBenchmark(benchmark.Benchmark): | |
158 @classmethod | |
159 def Name(cls): | |
160 return 'fake' | |
161 | |
162 test = DummyTest | |
163 | |
164 def page_set(self): | |
165 story_set = story_module.StorySet() | |
166 | |
167 example_page = page.Page( | |
168 'https://www.example.com', | |
169 startup_url='https://www.example.com', page_set=story_set) | |
170 return story_set | |
171 | |
154 | 172 |
155 def _GetOptionForUnittest(): | 173 def _GetOptionForUnittest(): |
156 options = options_for_unittests.GetCopy() | 174 options = options_for_unittests.GetCopy() |
157 options.output_formats = ['none'] | 175 options.output_formats = ['none'] |
158 options.suppress_gtest_report = False | 176 options.suppress_gtest_report = False |
159 parser = options.CreateParser() | 177 parser = options.CreateParser() |
160 story_runner.AddCommandLineArgs(parser) | 178 story_runner.AddCommandLineArgs(parser) |
161 options.MergeDefaultValues(parser.get_default_values()) | 179 options.MergeDefaultValues(parser.get_default_values()) |
162 story_runner.ProcessCommandLineArgs(parser, options) | 180 story_runner.ProcessCommandLineArgs(parser, options) |
163 return options | 181 return options |
(...skipping 865 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1029 mock.call.test.WillRunStory(root_mock.state.platform), | 1047 mock.call.test.WillRunStory(root_mock.state.platform), |
1030 mock.call.state.WillRunStory(root_mock.story), | 1048 mock.call.state.WillRunStory(root_mock.story), |
1031 mock.call.state.CanRunStory(root_mock.story), | 1049 mock.call.state.CanRunStory(root_mock.story), |
1032 mock.call.state.RunStory(root_mock.results), | 1050 mock.call.state.RunStory(root_mock.results), |
1033 mock.call.test.Measure(root_mock.state.platform, root_mock.results), | 1051 mock.call.test.Measure(root_mock.state.platform, root_mock.results), |
1034 mock.call.state.DumpStateUponFailure(root_mock.story, root_mock.results), | 1052 mock.call.state.DumpStateUponFailure(root_mock.story, root_mock.results), |
1035 mock.call.results.AddValue(FailureValueMatcher('foo')), | 1053 mock.call.results.AddValue(FailureValueMatcher('foo')), |
1036 mock.call.state.DidRunStory(root_mock.results), | 1054 mock.call.state.DidRunStory(root_mock.results), |
1037 mock.call.test.DidRunStory(root_mock.state.platform) | 1055 mock.call.test.DidRunStory(root_mock.state.platform) |
1038 ]) | 1056 ]) |
1057 | |
1058 def testRunBenchmarkTimeDuration(self): | |
1059 fake_benchmark = FakeBenchmark() | |
1060 options = fakes._FakeBrowserFinderOptions() | |
nednguyen
2017/03/23 23:26:52
Use options = fakes.CreateBrowserFinderOptions() i
martiniss
2017/03/24 17:28:36
Done.
| |
1061 options.upload_results = None | |
1062 options.suppress_gtest_report = False | |
1063 options.results_label = None | |
1064 options.use_live_sites = False | |
1065 options.max_failures = 100 | |
1066 options.pageset_repeat = 1 | |
1067 options.output_formats = ['chartjson'] | |
1068 | |
1069 with mock.patch('telemetry.internal.story_runner.time.time') as time_patch: | |
1070 # 3, because telemetry code asks for the time at some point | |
1071 time_patch.side_effect = [1, 0, 61] | |
1072 tmp_path = tempfile.mkdtemp() | |
1073 | |
1074 try: | |
1075 options.output_dir = tmp_path | |
1076 story_runner.RunBenchmark(fake_benchmark, options) | |
1077 with open(os.path.join(tmp_path, 'results-chart.json')) as f: | |
1078 data = json.load(f) | |
1079 | |
1080 self.assertEqual(len(data['charts']), 1) | |
1081 charts = data['charts'] | |
1082 self.assertIn('BenchmarkDuration', charts) | |
1083 duration = charts['BenchmarkDuration'] | |
1084 self.assertIn("summary", duration) | |
1085 summary = duration['summary'] | |
1086 duration = summary['value'] | |
1087 self.assertTrue(abs(duration - 1) < 0.001) | |
nednguyen
2017/03/23 23:26:52
nits: self.assertAlmostEqual(duration, 1)
martiniss
2017/03/24 17:28:36
Done.
| |
1088 finally: | |
1089 shutil.rmtree(tmp_path) | |
OLD | NEW |