Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Run the first page of every benchmark that has a composable measurement. | 5 """For the benchmarks that set options, test that the options are valid.""" |
| 6 | |
| 7 Ideally this test would be comprehensive, but the above serves as a | |
| 8 kind of smoke test. | |
| 9 """ | |
| 10 | 6 |
| 11 import os | 7 import os |
| 12 import unittest | 8 import unittest |
| 13 | 9 |
| 14 from telemetry import benchmark as benchmark_module | 10 from telemetry import benchmark as benchmark_module |
| 11 from telemetry.core import browser_options | |
| 15 from telemetry.core import discover | 12 from telemetry.core import discover |
| 16 from telemetry.page import page_test | |
| 17 from telemetry.unittest import options_for_unittests | |
| 18 from telemetry.unittest import progress_reporter | 13 from telemetry.unittest import progress_reporter |
| 14 from telemetry.util import path | |
| 19 | 15 |
| 20 | 16 |
| 21 def SmokeTestGenerator(benchmark): | 17 def _GetPerfDir(*subdirs): |
| 22 # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. | 18 return os.path.join(path.GetChromiumSrcDir(), 'tools', 'perf', *subdirs) |
|
dtu
2014/10/14 23:14:41
I prefer having this defined relative to __file__
slamm
2014/10/14 23:23:55
Done.
| |
| 23 # | |
| 24 # This smoke test dynamically tests all benchmarks. So disabling it for one | |
| 25 # failing or flaky benchmark would disable a much wider swath of coverage | |
| 26 # than is usally intended. Instead, if a particular benchmark is failing, | |
| 27 # disable it in tools/perf/benchmarks/*. | |
| 28 @benchmark_module.Disabled('chromeos') # crbug.com/351114 | |
| 29 def BenchmarkSmokeTest(self): | |
| 30 # Only measure a single page so that this test cycles reasonably quickly. | |
| 31 benchmark.options['pageset_repeat'] = 1 | |
| 32 benchmark.options['page_repeat'] = 1 | |
| 33 | 19 |
| 34 class SinglePageBenchmark(benchmark): # pylint: disable=W0232 | |
| 35 def CreatePageSet(self, options): | |
| 36 # pylint: disable=E1002 | |
| 37 ps = super(SinglePageBenchmark, self).CreatePageSet(options) | |
| 38 for p in ps.pages: | |
| 39 p.skip_waits = True | |
| 40 ps.user_stories = [p] | |
| 41 break | |
| 42 return ps | |
| 43 | 20 |
| 44 # Set the benchmark's default arguments. | 21 def _BenchmarkOptionsTestGenerator(benchmark): |
| 45 options = options_for_unittests.GetCopy() | 22 def testBenchmarkOptions(self): # pylint: disable=W0613 |
| 46 options.output_format = 'none' | 23 """Invalid options will raise benchmark.InvalidOptionsError.""" |
| 47 options.suppress_gtest_report = True | 24 options = browser_options.BrowserFinderOptions() |
| 48 parser = options.CreateParser() | 25 parser = options.CreateParser() |
| 49 | |
| 50 benchmark.AddCommandLineArgs(parser) | 26 benchmark.AddCommandLineArgs(parser) |
| 51 benchmark_module.AddCommandLineArgs(parser) | 27 benchmark_module.AddCommandLineArgs(parser) |
| 52 benchmark.SetArgumentDefaults(parser) | 28 benchmark.SetArgumentDefaults(parser) |
| 53 options.MergeDefaultValues(parser.get_default_values()) | 29 return testBenchmarkOptions |
| 54 | 30 |
| 55 benchmark.ProcessCommandLineArgs(None, options) | |
| 56 benchmark_module.ProcessCommandLineArgs(None, options) | |
| 57 | 31 |
| 58 self.assertEqual(0, SinglePageBenchmark().Run(options), | 32 def _AddBenchmarkOptionsTests(suite): |
| 59 msg='Failed: %s' % benchmark) | 33 all_benchmarks = discover.DiscoverClasses( |
| 60 | 34 _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark, |
| 61 return BenchmarkSmokeTest | 35 index_by_class_name=True).values() |
| 36 for benchmark in all_benchmarks: | |
| 37 if not benchmark.options: | |
| 38 # No need to test benchmarks that have not defined options. | |
| 39 continue | |
| 40 class BenchmarkOptionsTest(unittest.TestCase): | |
| 41 pass | |
| 42 setattr(BenchmarkOptionsTest, benchmark.Name(), | |
| 43 _BenchmarkOptionsTestGenerator(benchmark)) | |
| 44 suite.addTest(BenchmarkOptionsTest(benchmark.Name())) | |
| 62 | 45 |
| 63 | 46 |
| 64 def load_tests(_, _2, _3): | 47 def load_tests(_, _2, _3): |
| 65 suite = progress_reporter.TestSuite() | 48 suite = progress_reporter.TestSuite() |
| 66 | 49 _AddBenchmarkOptionsTests(suite) |
| 67 benchmarks_dir = os.path.dirname(__file__) | |
| 68 top_level_dir = os.path.dirname(benchmarks_dir) | |
| 69 measurements_dir = os.path.join(top_level_dir, 'measurements') | |
| 70 | |
| 71 all_measurements = discover.DiscoverClasses( | |
| 72 measurements_dir, top_level_dir, page_test.PageTest, | |
| 73 pattern='*.py').values() | |
| 74 all_benchmarks = discover.DiscoverClasses( | |
| 75 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | |
| 76 pattern='*.py').values() | |
| 77 for benchmark in all_benchmarks: | |
| 78 if benchmark.PageTestClass() not in all_measurements: | |
| 79 # If the benchmark is not in measurements, then it is not composable. | |
| 80 # Ideally we'd like to test these as well, but the non-composable | |
| 81 # benchmarks are usually long-running benchmarks. | |
| 82 continue | |
| 83 | |
| 84 # TODO(tonyg): Smoke doesn't work with session_restore yet. | |
| 85 if benchmark.Name().startswith('session_restore'): | |
| 86 continue | |
| 87 | |
| 88 if hasattr(benchmark, 'generated_profile_archive'): | |
| 89 # We'd like to test these, but don't know how yet. | |
| 90 continue | |
| 91 | |
| 92 class BenchmarkSmokeTest(unittest.TestCase): | |
| 93 pass | |
| 94 setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark)) | |
| 95 suite.addTest(BenchmarkSmokeTest(benchmark.Name())) | |
| 96 | |
| 97 return suite | 50 return suite |
| OLD | NEW |