Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(398)

Side by Side Diff: tools/perf/benchmarks/benchmark_smoke_unittest.py

Issue 1111543002: CL for perf tryjob on android (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/perf/benchmarks/smoothness.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2014 The Chromium Authors. All rights reserved. 1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Run the first page of one benchmark for every module. 5 """Run the first page of one benchmark for every module.
6 6
7 Only benchmarks that have a composable measurement are included. 7 Only benchmarks that have a composable measurement are included.
8 Ideally this test would be comprehensive, however, running one page 8 Ideally this test would be comprehensive, however, running one page
9 of every benchmark would run impractically long. 9 of every benchmark would run impractically long.
10 """ 10 """
11 11
12 import os 12 import os
13 import unittest 13 import unittest
14 14
15 from telemetry import benchmark as benchmark_module 15 from telemetry import benchmark as benchmark_module
16 from telemetry.core import discover 16 from telemetry.core import discover
17 from telemetry.page import page_test 17 from telemetry.page import page_test
18 from telemetry.unittest_util import options_for_unittests 18 from telemetry.unittest_util import options_for_unittests
19 from telemetry.unittest_util import progress_reporter 19 from telemetry.unittest_util import progress_reporter
20 20
21 from measurements import smoothness
22 from measurements import repaint
21 23
22 def SmokeTestGenerator(benchmark): 24 def SmokeTestGenerator(benchmark):
23 # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST. 25 # NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST.
24 # 26 #
25 # This smoke test dynamically tests all benchmarks. So disabling it for one 27 # This smoke test dynamically tests all benchmarks. So disabling it for one
26 # failing or flaky benchmark would disable a much wider swath of coverage 28 # failing or flaky benchmark would disable a much wider swath of coverage
27 # than is usally intended. Instead, if a particular benchmark is failing, 29 # than is usally intended. Instead, if a particular benchmark is failing,
28 # disable it in tools/perf/benchmarks/*. 30 # disable it in tools/perf/benchmarks/*.
29 @benchmark_module.Disabled('chromeos') # crbug.com/351114 31 @benchmark_module.Disabled('chromeos') # crbug.com/351114
30 def BenchmarkSmokeTest(self): 32 def BenchmarkSmokeTest(self):
31 # Only measure a single page so that this test cycles reasonably quickly. 33 # Only measure a single page so that this test cycles reasonably quickly.
32 benchmark.options['pageset_repeat'] = 1 34 benchmark.options['pageset_repeat'] = 1
33 benchmark.options['page_repeat'] = 1 35 benchmark.options['page_repeat'] = 1
34 36
35 class SinglePageBenchmark(benchmark): # pylint: disable=W0232
36 def CreatePageSet(self, options):
37 # pylint: disable=E1002
38 ps = super(SinglePageBenchmark, self).CreatePageSet(options)
39 for p in ps.pages:
40 p.skip_waits = True
41 ps.user_stories = [p]
42 break
43 return ps
44
45 # Set the benchmark's default arguments. 37 # Set the benchmark's default arguments.
46 options = options_for_unittests.GetCopy() 38 options = options_for_unittests.GetCopy()
47 options.output_format = 'none' 39 options.output_format = 'none'
48 options.suppress_gtest_report = True 40 options.suppress_gtest_report = True
49 parser = options.CreateParser() 41 parser = options.CreateParser()
50 42
51 benchmark.AddCommandLineArgs(parser) 43 benchmark.AddCommandLineArgs(parser)
52 benchmark_module.AddCommandLineArgs(parser) 44 benchmark_module.AddCommandLineArgs(parser)
53 benchmark.SetArgumentDefaults(parser) 45 benchmark.SetArgumentDefaults(parser)
54 options.MergeDefaultValues(parser.get_default_values()) 46 options.MergeDefaultValues(parser.get_default_values())
55 47
56 benchmark.ProcessCommandLineArgs(None, options) 48 benchmark.ProcessCommandLineArgs(None, options)
57 benchmark_module.ProcessCommandLineArgs(None, options) 49 benchmark_module.ProcessCommandLineArgs(None, options)
58 50
59 self.assertEqual(0, SinglePageBenchmark().Run(options), 51 self.assertEqual(0, benchmark().Run(options),
60 msg='Failed: %s' % benchmark) 52 msg='Failed: %s' % benchmark)
61 53
62 return BenchmarkSmokeTest 54 return BenchmarkSmokeTest
63 55
64 56
65 def load_tests(_, _2, _3): 57 def load_tests(_, _2, _3):
66 suite = progress_reporter.TestSuite() 58 suite = progress_reporter.TestSuite()
67 59
68 benchmarks_dir = os.path.dirname(__file__) 60 benchmarks_dir = os.path.dirname(__file__)
69 top_level_dir = os.path.dirname(benchmarks_dir) 61 top_level_dir = os.path.dirname(benchmarks_dir)
70 measurements_dir = os.path.join(top_level_dir, 'measurements')
71 62
72 all_measurements = discover.DiscoverClasses( 63 all_measurements = [
73 measurements_dir, top_level_dir, page_test.PageTest).values() 64 smoothness.Smoothness
65 ]
74 # Using the default of |index_by_class_name=False| means that if a module 66 # Using the default of |index_by_class_name=False| means that if a module
75 # has multiple benchmarks, only the last one is returned. 67 # has multiple benchmarks, only the last one is returned.
76 all_benchmarks = discover.DiscoverClasses( 68 all_benchmarks = discover.DiscoverClasses(
77 benchmarks_dir, top_level_dir, benchmark_module.Benchmark, 69 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
78 index_by_class_name=False).values() 70 index_by_class_name=True).values()
79 for benchmark in all_benchmarks: 71 for benchmark in all_benchmarks:
80 if hasattr(benchmark, 'test') and benchmark.test not in all_measurements: 72 if benchmark.test not in all_measurements:
81 # If the benchmark does not have a measurement, then it is not composable. 73 # If the benchmark does not have a measurement, then it is not composable.
82 # Ideally we'd like to test these as well, but the non-composable 74 # Ideally we'd like to test these as well, but the non-composable
83 # benchmarks are usually long-running benchmarks. 75 # benchmarks are usually long-running benchmarks.
84 continue 76 continue
85 77
86 # TODO(tonyg): Smoke doesn't work with session_restore yet. 78 # TODO(tonyg): Smoke doesn't work with session_restore yet.
87 if (benchmark.Name().startswith('session_restore') or 79 if (benchmark.Name().startswith('session_restore') or
88 benchmark.Name().startswith('skpicture_printer')): 80 benchmark.Name().startswith('skpicture_printer')):
89 continue 81 continue
90 82
91 if hasattr(benchmark, 'generated_profile_archive'): 83 if hasattr(benchmark, 'generated_profile_archive'):
92 # We'd like to test these, but don't know how yet. 84 # We'd like to test these, but don't know how yet.
93 continue 85 continue
94 86
95 class BenchmarkSmokeTest(unittest.TestCase): 87 class BenchmarkSmokeTest(unittest.TestCase):
96 pass 88 pass
97 89
98 method = SmokeTestGenerator(benchmark) 90 method = SmokeTestGenerator(benchmark)
99 91
100 # Make sure any decorators are propagated from the original declaration. 92 # Make sure any decorators are propagated from the original declaration.
101 # (access to protected members) pylint: disable=W0212 93 # (access to protected members) pylint: disable=W0212
102 # TODO(dpranke): Since we only pick the first test from every class 94 # TODO(dpranke): Since we only pick the first test from every class
103 # (above), if that test is disabled, we'll end up not running *any* 95 # (above), if that test is disabled, we'll end up not running *any*
104 # test from the class. We should probably discover all of the tests 96 # test from the class. We should probably discover all of the tests
105 # in a class, and then throw the ones we don't need away instead. 97 # in a class, and then throw the ones we don't need away instead.
106 if hasattr(benchmark, '_enabled_strings'): 98 if (hasattr(benchmark, '_enabled_strings')):
99 if 'android' in benchmark._enabled_strings:
100 benchmark._enabled_strings.remove('android')
107 method._enabled_strings = benchmark._enabled_strings 101 method._enabled_strings = benchmark._enabled_strings
108 if hasattr(benchmark, '_disabled_strings'): 102 if hasattr(benchmark, '_disabled_strings'):
109 method._disabled_strings = benchmark._disabled_strings 103 method._disabled_strings = benchmark._disabled_strings
110 setattr(BenchmarkSmokeTest, benchmark.Name(), method) 104 setattr(BenchmarkSmokeTest, benchmark.Name(), method)
111 105
112 suite.addTest(BenchmarkSmokeTest(benchmark.Name())) 106 suite.addTest(BenchmarkSmokeTest(benchmark.Name()))
113 107
114 return suite 108 return suite
OLDNEW
« no previous file with comments | « no previous file | tools/perf/benchmarks/smoothness.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698