Chromium Code Reviews| Index: tools/perf/benchmarks/benchmark_unittest.py |
| diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py |
| index 476804ed536fc00f2d39ba93634411665ec7bc23..ff9af6cc667845ece3c705ce0b23411650eefc0b 100644 |
| --- a/tools/perf/benchmarks/benchmark_unittest.py |
| +++ b/tools/perf/benchmarks/benchmark_unittest.py |
| @@ -2,96 +2,49 @@ |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| -"""Run the first page of every benchmark that has a composable measurement. |
| - |
| -Ideally this test would be comprehensive, but the above serves as a |
| -kind of smoke test. |
| -""" |
| +"""For the benchmarks that set options, test that the options are valid.""" |
| import os |
| import unittest |
| from telemetry import benchmark as benchmark_module |
| +from telemetry.core import browser_options |
| from telemetry.core import discover |
| -from telemetry.page import page_test |
| -from telemetry.unittest import options_for_unittests |
| from telemetry.unittest import progress_reporter |
| +from telemetry.util import path |
| -def SmokeTestGenerator(benchmark): |
| - # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. |
| - # |
| - # This smoke test dynamically tests all benchmarks. So disabling it for one |
| - # failing or flaky benchmark would disable a much wider swath of coverage |
| - # than is usally intended. Instead, if a particular benchmark is failing, |
| - # disable it in tools/perf/benchmarks/*. |
| - @benchmark_module.Disabled('chromeos') # crbug.com/351114 |
| - def BenchmarkSmokeTest(self): |
| - # Only measure a single page so that this test cycles reasonably quickly. |
| - benchmark.options['pageset_repeat'] = 1 |
| - benchmark.options['page_repeat'] = 1 |
| +def _GetPerfDir(*subdirs): |
| + return os.path.join(path.GetChromiumSrcDir(), 'tools', 'perf', *subdirs) |
|
dtu
2014/10/14 23:14:41
I prefer having this defined relative to __file__
slamm
2014/10/14 23:23:55
Done.
|
| - class SinglePageBenchmark(benchmark): # pylint: disable=W0232 |
| - def CreatePageSet(self, options): |
| - # pylint: disable=E1002 |
| - ps = super(SinglePageBenchmark, self).CreatePageSet(options) |
| - for p in ps.pages: |
| - p.skip_waits = True |
| - ps.user_stories = [p] |
| - break |
| - return ps |
| - # Set the benchmark's default arguments. |
| - options = options_for_unittests.GetCopy() |
| - options.output_format = 'none' |
| - options.suppress_gtest_report = True |
| +def _BenchmarkOptionsTestGenerator(benchmark): |
| + def testBenchmarkOptions(self): # pylint: disable=W0613 |
| + """Invalid options will raise benchmark.InvalidOptionsError.""" |
| + options = browser_options.BrowserFinderOptions() |
| parser = options.CreateParser() |
| - |
| benchmark.AddCommandLineArgs(parser) |
| benchmark_module.AddCommandLineArgs(parser) |
| benchmark.SetArgumentDefaults(parser) |
| - options.MergeDefaultValues(parser.get_default_values()) |
| - |
| - benchmark.ProcessCommandLineArgs(None, options) |
| - benchmark_module.ProcessCommandLineArgs(None, options) |
| - |
| - self.assertEqual(0, SinglePageBenchmark().Run(options), |
| - msg='Failed: %s' % benchmark) |
| - |
| - return BenchmarkSmokeTest |
| - |
| - |
| -def load_tests(_, _2, _3): |
| - suite = progress_reporter.TestSuite() |
| + return testBenchmarkOptions |
| - benchmarks_dir = os.path.dirname(__file__) |
| - top_level_dir = os.path.dirname(benchmarks_dir) |
| - measurements_dir = os.path.join(top_level_dir, 'measurements') |
| - all_measurements = discover.DiscoverClasses( |
| - measurements_dir, top_level_dir, page_test.PageTest, |
| - pattern='*.py').values() |
| +def _AddBenchmarkOptionsTests(suite): |
| all_benchmarks = discover.DiscoverClasses( |
| - benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| - pattern='*.py').values() |
| + _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark, |
| + index_by_class_name=True).values() |
| for benchmark in all_benchmarks: |
| - if benchmark.PageTestClass() not in all_measurements: |
| - # If the benchmark is not in measurements, then it is not composable. |
| - # Ideally we'd like to test these as well, but the non-composable |
| - # benchmarks are usually long-running benchmarks. |
| + if not benchmark.options: |
| + # No need to test benchmarks that have not defined options. |
| continue |
| - |
| - # TODO(tonyg): Smoke doesn't work with session_restore yet. |
| - if benchmark.Name().startswith('session_restore'): |
| - continue |
| - |
| - if hasattr(benchmark, 'generated_profile_archive'): |
| - # We'd like to test these, but don't know how yet. |
| - continue |
| - |
| - class BenchmarkSmokeTest(unittest.TestCase): |
| + class BenchmarkOptionsTest(unittest.TestCase): |
| pass |
| - setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark)) |
| - suite.addTest(BenchmarkSmokeTest(benchmark.Name())) |
| + setattr(BenchmarkOptionsTest, benchmark.Name(), |
| + _BenchmarkOptionsTestGenerator(benchmark)) |
| + suite.addTest(BenchmarkOptionsTest(benchmark.Name())) |
| + |
| +def load_tests(_, _2, _3): |
| + suite = progress_reporter.TestSuite() |
| + _AddBenchmarkOptionsTests(suite) |
| return suite |