Index: tools/perf/benchmarks/benchmark_smoke_unittest.py |
diff --git a/tools/perf/benchmarks/benchmark_smoke_unittest.py b/tools/perf/benchmarks/benchmark_smoke_unittest.py |
index bed257ebb9736c0a777a899a4fc23aed7018d43f..a60e6a2cc24398cfdbb408fd94644db4fe2c19fe 100644 |
--- a/tools/perf/benchmarks/benchmark_smoke_unittest.py |
+++ b/tools/perf/benchmarks/benchmark_smoke_unittest.py |
@@ -18,6 +18,8 @@ from telemetry.page import page_test |
from telemetry.unittest_util import options_for_unittests |
from telemetry.unittest_util import progress_reporter |
+from measurements import smoothness |
+from measurements import repaint |
def SmokeTestGenerator(benchmark): |
# NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. |
@@ -32,16 +34,6 @@ def SmokeTestGenerator(benchmark): |
benchmark.options['pageset_repeat'] = 1 |
benchmark.options['page_repeat'] = 1 |
- class SinglePageBenchmark(benchmark): # pylint: disable=W0232 |
- def CreatePageSet(self, options): |
- # pylint: disable=E1002 |
- ps = super(SinglePageBenchmark, self).CreatePageSet(options) |
- for p in ps.pages: |
- p.skip_waits = True |
- ps.user_stories = [p] |
- break |
- return ps |
- |
# Set the benchmark's default arguments. |
options = options_for_unittests.GetCopy() |
options.output_format = 'none' |
@@ -56,7 +48,7 @@ def SmokeTestGenerator(benchmark): |
benchmark.ProcessCommandLineArgs(None, options) |
benchmark_module.ProcessCommandLineArgs(None, options) |
- self.assertEqual(0, SinglePageBenchmark().Run(options), |
+ self.assertEqual(0, benchmark().Run(options), |
msg='Failed: %s' % benchmark) |
return BenchmarkSmokeTest |
@@ -67,17 +59,17 @@ def load_tests(_, _2, _3): |
benchmarks_dir = os.path.dirname(__file__) |
top_level_dir = os.path.dirname(benchmarks_dir) |
- measurements_dir = os.path.join(top_level_dir, 'measurements') |
- all_measurements = discover.DiscoverClasses( |
- measurements_dir, top_level_dir, page_test.PageTest).values() |
+ all_measurements = [ |
+ smoothness.Smoothness |
+ ] |
# Using the default of |index_by_class_name=False| means that if a module |
# has multiple benchmarks, only the last one is returned. |
all_benchmarks = discover.DiscoverClasses( |
benchmarks_dir, top_level_dir, benchmark_module.Benchmark, |
- index_by_class_name=False).values() |
+ index_by_class_name=True).values() |
for benchmark in all_benchmarks: |
- if hasattr(benchmark, 'test') and benchmark.test not in all_measurements: |
+ if benchmark.test not in all_measurements: |
# If the benchmark does not have a measurement, then it is not composable. |
# Ideally we'd like to test these as well, but the non-composable |
# benchmarks are usually long-running benchmarks. |
@@ -103,7 +95,9 @@ def load_tests(_, _2, _3): |
# (above), if that test is disabled, we'll end up not running *any* |
# test from the class. We should probably discover all of the tests |
# in a class, and then throw the ones we don't need away instead. |
- if hasattr(benchmark, '_enabled_strings'): |
+ if (hasattr(benchmark, '_enabled_strings')): |
+ if 'android' in benchmark._enabled_strings: |
+ benchmark._enabled_strings.remove('android') |
method._enabled_strings = benchmark._enabled_strings |
if hasattr(benchmark, '_disabled_strings'): |
method._disabled_strings = benchmark._disabled_strings |