| OLD | NEW |
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Run the first page of one benchmark for every module. | 5 """Run the first page of one benchmark for every module. |
| 6 | 6 |
| 7 Only benchmarks that have a composable measurement are included. | 7 Only benchmarks that have a composable measurement are included. |
| 8 Ideally this test would be comprehensive, however, running one page | 8 Ideally this test would be comprehensive, however, running one page |
| 9 of every benchmark would run impractically long. | 9 of every benchmark would run impractically long. |
| 10 """ | 10 """ |
| 11 | 11 |
| 12 import os | 12 import os |
| 13 import unittest | 13 import unittest |
| 14 | 14 |
| 15 from telemetry import benchmark as benchmark_module | 15 from telemetry import benchmark as benchmark_module |
| 16 from telemetry.core import discover | 16 from telemetry.core import discover |
| 17 from telemetry.page import page_test | 17 from telemetry.page import page_test |
| 18 from telemetry.unittest import options_for_unittests | 18 from telemetry.unittest import options_for_unittests |
| 19 from telemetry.unittest import progress_reporter | 19 from telemetry.unittest import progress_reporter |
| 20 | 20 |
| 21 | 21 |
| 22 def SmokeTestGenerator(benchmark): | 22 def SmokeTestGenerator(benchmark_class): |
| 23 # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. | 23 # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. |
| 24 # | 24 # |
| 25 # This smoke test dynamically tests all benchmarks. So disabling it for one | 25 # This smoke test dynamically tests all benchmarks. So disabling it for one |
| 26 # failing or flaky benchmark would disable a much wider swath of coverage | 26 # failing or flaky benchmark would disable a much wider swath of coverage |
| 27 # than is usally intended. Instead, if a particular benchmark is failing, | 27 # than is usally intended. Instead, if a particular benchmark is failing, |
| 28 # disable it in tools/perf/benchmarks/*. | 28 # disable it in tools/perf/benchmarks/*. |
| 29 @benchmark_module.Disabled('chromeos') # crbug.com/351114 | 29 @benchmark_module.Disabled('chromeos') # crbug.com/351114 |
| 30 def BenchmarkSmokeTest(self): | 30 def BenchmarkSmokeTest(self): |
| 31 # Only measure a single page so that this test cycles reasonably quickly. | 31 # Only measure a single page so that this test cycles reasonably quickly. |
| 32 benchmark.options['pageset_repeat'] = 1 | 32 benchmark_class.options['pageset_repeat'] = 1 |
| 33 benchmark.options['page_repeat'] = 1 | 33 benchmark_class.options['page_repeat'] = 1 |
| 34 | 34 |
| 35 class SinglePageBenchmark(benchmark): # pylint: disable=W0232 | 35 class SinglePageBenchmark(benchmark_class): # pylint: disable=W0232 |
| 36 def CreatePageSet(self, options): | 36 def CreatePageSet(self, options): |
| 37 # pylint: disable=E1002 | 37 # pylint: disable=E1002 |
| 38 ps = super(SinglePageBenchmark, self).CreatePageSet(options) | 38 ps = super(SinglePageBenchmark, self).CreatePageSet(options) |
| 39 for p in ps.pages: | 39 for p in ps.pages: |
| 40 p.skip_waits = True | 40 p.skip_waits = True |
| 41 ps.user_stories = [p] | 41 ps.user_stories = [p] |
| 42 break | 42 break |
| 43 return ps | 43 return ps |
| 44 | 44 |
| 45 # Set the benchmark's default arguments. | 45 # Set the benchmark's default arguments. |
| 46 options = options_for_unittests.GetCopy() | 46 options = options_for_unittests.GetCopy() |
| 47 options.output_format = 'none' | 47 options.output_format = 'none' |
| 48 options.suppress_gtest_report = True | 48 options.suppress_gtest_report = True |
| 49 parser = options.CreateParser() | 49 parser = options.CreateParser() |
| 50 | 50 |
| 51 benchmark = SinglePageBenchmark() |
| 51 benchmark.AddCommandLineArgs(parser) | 52 benchmark.AddCommandLineArgs(parser) |
| 52 benchmark_module.AddCommandLineArgs(parser) | 53 benchmark_module.AddCommandLineArgs(parser) |
| 53 benchmark.SetArgumentDefaults(parser) | 54 benchmark.SetArgumentDefaults(parser) |
| 54 options.MergeDefaultValues(parser.get_default_values()) | 55 options.MergeDefaultValues(parser.get_default_values()) |
| 55 | 56 |
| 56 benchmark.ProcessCommandLineArgs(None, options) | 57 benchmark.ProcessCommandLineArgs(None, options) |
| 57 benchmark_module.ProcessCommandLineArgs(None, options) | 58 benchmark_module.ProcessCommandLineArgs(None, options) |
| 58 | 59 |
| 59 self.assertEqual(0, SinglePageBenchmark().Run(options), | 60 self.assertEqual(0, benchmark.Run(options), |
| 60 msg='Failed: %s' % benchmark) | 61 msg='Failed: %s' % benchmark) |
| 61 | 62 |
| 62 return BenchmarkSmokeTest | 63 return BenchmarkSmokeTest |
| 63 | 64 |
| 64 | 65 |
| 65 def load_tests(_, _2, _3): | 66 def load_tests(_, _2, _3): |
| 66 suite = progress_reporter.TestSuite() | 67 suite = progress_reporter.TestSuite() |
| 67 | 68 |
| 68 benchmarks_dir = os.path.dirname(__file__) | 69 benchmarks_dir = os.path.dirname(__file__) |
| 69 top_level_dir = os.path.dirname(benchmarks_dir) | 70 top_level_dir = os.path.dirname(benchmarks_dir) |
| 70 measurements_dir = os.path.join(top_level_dir, 'measurements') | 71 measurements_dir = os.path.join(top_level_dir, 'measurements') |
| 71 | 72 |
| 72 all_measurements = discover.DiscoverClasses( | 73 all_measurements = discover.DiscoverClasses( |
| 73 measurements_dir, top_level_dir, page_test.PageTest).values() | 74 measurements_dir, top_level_dir, page_test.PageTest).values() |
| 74 # Using the default of |index_by_class_name=False| means that if a module | 75 # Using the default of |index_by_class_name=False| means that if a module |
| 75 # has multiple benchmarks, only the last one is returned. | 76 # has multiple benchmarks, only the last one is returned. |
| 76 all_benchmarks = discover.DiscoverClasses( | 77 all_benchmarks = discover.DiscoverClasses( |
| 77 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, | 78 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
| 78 index_by_class_name=False).values() | 79 index_by_class_name=False).values() |
| 79 for benchmark in all_benchmarks: | 80 for benchmark in all_benchmarks: |
| 80 if benchmark.PageTestClass() not in all_measurements: | 81 if hasattr(benchmark, 'test') and benchmark.test not in all_measurements: |
| 81 # If the benchmark is not in measurements, then it is not composable. | 82 # If the benchmark is not in measurements, then it is not composable. |
| 82 # Ideally we'd like to test these as well, but the non-composable | 83 # Ideally we'd like to test these as well, but the non-composable |
| 83 # benchmarks are usually long-running benchmarks. | 84 # benchmarks are usually long-running benchmarks. |
| 84 continue | 85 continue |
| 85 | 86 |
| 86 # TODO(tonyg): Smoke doesn't work with session_restore yet. | 87 # TODO(tonyg): Smoke doesn't work with session_restore yet. |
| 87 if benchmark.Name().startswith('session_restore'): | 88 if benchmark.Name().startswith('session_restore'): |
| 88 continue | 89 continue |
| 89 | 90 |
| 90 if hasattr(benchmark, 'generated_profile_archive'): | 91 if hasattr(benchmark, 'generated_profile_archive'): |
| 91 # We'd like to test these, but don't know how yet. | 92 # We'd like to test these, but don't know how yet. |
| 92 continue | 93 continue |
| 93 | 94 |
| 94 class BenchmarkSmokeTest(unittest.TestCase): | 95 class BenchmarkSmokeTest(unittest.TestCase): |
| 95 pass | 96 pass |
| 96 setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark)) | 97 setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark)) |
| 97 suite.addTest(BenchmarkSmokeTest(benchmark.Name())) | 98 suite.addTest(BenchmarkSmokeTest(benchmark.Name())) |
| 98 | 99 |
| 99 return suite | 100 return suite |
| OLD | NEW |