Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1100)

Unified Diff: content/test/gpu/generate_buildbot_json.py

Issue 2591813002: Run angle_perftests on the GPU FYI bots. (Closed)
Patch Set: rebase Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/test/gpu/generate_buildbot_json.py
diff --git a/content/test/gpu/generate_buildbot_json.py b/content/test/gpu/generate_buildbot_json.py
index 65240c5f553376142c5c45aa65de4871ee049cb0..7ac81681011ff2ec8548511cf0d3a8ad7139bd6e 100755
--- a/content/test/gpu/generate_buildbot_json.py
+++ b/content/test/gpu/generate_buildbot_json.py
@@ -1363,6 +1363,32 @@ TELEMETRY_GPU_INTEGRATION_TESTS = {
},
}
+# These isolated tests don't use telemetry. They need to be placed in the
+# isolated_scripts section of the generated json.
+NON_TELEMETRY_ISOLATED_SCRIPT_TESTS = {
+ # We run angle_perftests on the ANGLE CQ to ensure the tests don't crash.
+ 'angle_perftests': {
+ 'tester_configs': [
+ {
+ 'fyi_only': True,
+ 'run_on_optional': True,
+ # Run on the Win/Linux Release NVIDIA bots.
+ 'build_configs': ['Release'],
+ 'swarming_dimension_sets': [
+ {
+ 'gpu': '10de:104a',
+ 'os': 'Windows-2008ServerR2-SP1'
+ },
+ {
+ 'gpu': '10de:104a',
+ 'os': 'Linux'
+ }
+ ],
+ },
+ ],
+ },
+}
+
def substitute_args(tester_config, args):
"""Substitutes the ${os_type} variable in |args| from the
tester_config's "os_type" property.
@@ -1596,6 +1622,57 @@ def generate_telemetry_tests(tester_name, tester_config,
isolated_scripts.append(test)
return isolated_scripts
+def generate_non_telemetry_isolated_test(tester_name, tester_config,
Ken Russell (switch to Gerrit) 2017/01/10 00:22:34 Could you add a TODO about refactoring this and ge
+ test, test_config, is_fyi):
+ if not should_run_on_tester(tester_name, tester_config, test_config, is_fyi):
+ return None
+ test_args = ['-v']
+ if 'args' in test_config:
+ test_args.extend(substitute_args(tester_config, test_config['args']))
+ if 'desktop_args' in test_config and not is_android(tester_config):
+ test_args.extend(substitute_args(tester_config,
+ test_config['desktop_args']))
+ if 'android_args' in test_config and is_android(tester_config):
+ test_args.extend(substitute_args(tester_config,
+ test_config['android_args']))
+ # The step name must end in 'test' or 'tests' in order for the
+ # results to automatically show up on the flakiness dashboard.
+ # (At least, this was true some time ago.) Continue to use this
+ # naming convention for the time being to minimize changes.
+ step_name = test
+ if not (step_name.endswith('test') or step_name.endswith('tests')):
+ step_name = '%s_tests' % step_name
+ # Prepend Telemetry GPU-specific flags.
Ken Russell (switch to Gerrit) 2017/01/10 00:22:33 Update: "Prepend GPU-specific flags."
+ swarming = {
+ # Always say this is true regardless of whether the tester
+ # supports swarming. It doesn't hurt.
+ 'can_use_on_swarming_builders': True,
+ 'dimension_sets': tester_config['swarming_dimensions']
+ }
+ if 'swarming' in test_config:
+ swarming.update(test_config['swarming'])
+ result = {
+ 'args': test_args,
+ 'isolate_name': test,
+ 'name': step_name,
+ 'swarming': swarming,
+ }
+ if 'non_precommit_args' in test_config:
+ result['non_precommit_args'] = test_config['non_precommit_args']
+ if 'precommit_args' in test_config:
+ result['precommit_args'] = test_config['precommit_args']
+ return result
+
+def generate_non_telemetry_isolated_tests(tester_name, tester_config,
+ test_dictionary, is_fyi):
+ isolated_scripts = []
+ for test_name, test_config in sorted(test_dictionary.iteritems()):
+ test = generate_non_telemetry_isolated_test(
+ tester_name, tester_config, test_name, test_config, is_fyi)
+ if test:
+ isolated_scripts.append(test)
+ return isolated_scripts
+
def generate_all_tests(waterfall, is_fyi):
tests = {}
for builder, config in waterfall['builders'].iteritems():
@@ -1604,7 +1681,9 @@ def generate_all_tests(waterfall, is_fyi):
gtests = generate_gtests(name, config, COMMON_GTESTS, is_fyi)
isolated_scripts = \
generate_telemetry_tests(
- name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi)
+ name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi) + \
+ generate_non_telemetry_isolated_tests(name, config,
+ NON_TELEMETRY_ISOLATED_SCRIPT_TESTS, is_fyi)
tests[name] = {
'gtest_tests': sorted(gtests, key=lambda x: x['test']),
'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698