Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Unified Diff: tools/perf/benchmarks/benchmark_unittest.py

Issue 818053003: [Telemetry] Add test that make sure no two benchmarks have the same name (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: tools/perf/benchmarks/benchmark_unittest.py
diff --git a/tools/perf/benchmarks/benchmark_unittest.py b/tools/perf/benchmarks/benchmark_unittest.py
index 22a4db76cd6a8c348ea1a33d7d9620ca85e52d1c..f5ffa5a88a6a0d1712b6a214ffd5b4423a92e260 100644
--- a/tools/perf/benchmarks/benchmark_unittest.py
+++ b/tools/perf/benchmarks/benchmark_unittest.py
@@ -4,8 +4,10 @@
"""For all the benchmarks that set options, test that the options are valid."""
+import logging
import os
import unittest
+from collections import defaultdict
from telemetry import benchmark as benchmark_module
from telemetry.core import browser_options
@@ -18,6 +20,11 @@ def _GetPerfDir(*subdirs):
return os.path.join(perf_dir, *subdirs)
+def _GetAllPerfBenchmarks():
+ return discover.DiscoverClasses(
+ _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
+ index_by_class_name=True).values()
+
def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=W0613
"""Invalid options will raise benchmark.InvalidOptionsError."""
@@ -29,12 +36,22 @@ def _BenchmarkOptionsTestGenerator(benchmark):
return testBenchmarkOptions
+class TestNoBenchmarkNamesDuplication(unittest.TestCase):
+ def runTest(self):
+ all_benchmarks = _GetAllPerfBenchmarks()
+ names_to_benchmarks = defaultdict(list)
+ for b in all_benchmarks:
+ names_to_benchmarks[b.Name()].append(b)
+ for n in names_to_benchmarks:
+ self.assertEquals(1, len(names_to_benchmarks[n]),
+ 'Multiple benchmarks with the same name %s are '
+ 'found: %s' % (n, str(names_to_benchmarks[n])))
+
+
def _AddBenchmarkOptionsTests(suite):
# Using |index_by_class_name=True| allows returning multiple benchmarks
# from a module.
- all_benchmarks = discover.DiscoverClasses(
- _GetPerfDir('benchmarks'), _GetPerfDir(), benchmark_module.Benchmark,
- index_by_class_name=True).values()
+ all_benchmarks = _GetAllPerfBenchmarks()
for benchmark in all_benchmarks:
if not benchmark.options:
# No need to test benchmarks that have not defined options.
@@ -44,6 +61,7 @@ def _AddBenchmarkOptionsTests(suite):
setattr(BenchmarkOptionsTest, benchmark.Name(),
_BenchmarkOptionsTestGenerator(benchmark))
suite.addTest(BenchmarkOptionsTest(benchmark.Name()))
+ suite.addTest(TestNoBenchmarkNamesDuplication())
def load_tests(_, _2, _3):
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698