Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(999)

Side by Side Diff: content/test/gpu/generate_buildbot_json.py

Issue 2591813002: Run angle_perftests on the GPU FYI bots. (Closed)
Patch Set: rebase Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.gpu.json and chromium.gpu.fyi.json in 6 """Script to generate chromium.gpu.json and chromium.gpu.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory. Maintaining these files by hand is
8 too unwieldy. 8 too unwieldy.
9 """ 9 """
10 10
(...skipping 1345 matching lines...) Expand 10 before | Expand all | Expand 10 after
1356 '../../content/test/data/gpu/webgl2_conformance_tests_output.json', 1356 '../../content/test/data/gpu/webgl2_conformance_tests_output.json',
1357 ], 1357 ],
1358 'swarming': { 1358 'swarming': {
1359 # These tests currently take about an hour and fifteen minutes 1359 # These tests currently take about an hour and fifteen minutes
1360 # to run. Split them into roughly 5-minute shards. 1360 # to run. Split them into roughly 5-minute shards.
1361 'shards': 15, 1361 'shards': 15,
1362 }, 1362 },
1363 }, 1363 },
1364 } 1364 }
1365 1365
1366 # These isolated tests don't use telemetry. They need to be placed in the
1367 # isolated_scripts section of the generated json.
1368 NON_TELEMETRY_ISOLATED_SCRIPT_TESTS = {
1369 # We run angle_perftests on the ANGLE CQ to ensure the tests don't crash.
1370 'angle_perftests': {
1371 'tester_configs': [
1372 {
1373 'fyi_only': True,
1374 'run_on_optional': True,
1375 # Run on the Win/Linux Release NVIDIA bots.
1376 'build_configs': ['Release'],
1377 'swarming_dimension_sets': [
1378 {
1379 'gpu': '10de:104a',
1380 'os': 'Windows-2008ServerR2-SP1'
1381 },
1382 {
1383 'gpu': '10de:104a',
1384 'os': 'Linux'
1385 }
1386 ],
1387 },
1388 ],
1389 },
1390 }
1391
1366 def substitute_args(tester_config, args): 1392 def substitute_args(tester_config, args):
1367 """Substitutes the ${os_type} variable in |args| from the 1393 """Substitutes the ${os_type} variable in |args| from the
1368 tester_config's "os_type" property. 1394 tester_config's "os_type" property.
1369 """ 1395 """
1370 substitutions = { 1396 substitutions = {
1371 'os_type': tester_config['os_type'] 1397 'os_type': tester_config['os_type']
1372 } 1398 }
1373 return [string.Template(arg).safe_substitute(substitutions) for arg in args] 1399 return [string.Template(arg).safe_substitute(substitutions) for arg in args]
1374 1400
1375 def matches_swarming_dimensions(tester_config, dimension_sets): 1401 def matches_swarming_dimensions(tester_config, dimension_sets):
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
1589 def generate_telemetry_tests(tester_name, tester_config, 1615 def generate_telemetry_tests(tester_name, tester_config,
1590 test_dictionary, is_fyi): 1616 test_dictionary, is_fyi):
1591 isolated_scripts = [] 1617 isolated_scripts = []
1592 for test_name, test_config in sorted(test_dictionary.iteritems()): 1618 for test_name, test_config in sorted(test_dictionary.iteritems()):
1593 test = generate_telemetry_test( 1619 test = generate_telemetry_test(
1594 tester_name, tester_config, test_name, test_config, is_fyi) 1620 tester_name, tester_config, test_name, test_config, is_fyi)
1595 if test: 1621 if test:
1596 isolated_scripts.append(test) 1622 isolated_scripts.append(test)
1597 return isolated_scripts 1623 return isolated_scripts
1598 1624
1625 def generate_non_telemetry_isolated_test(tester_name, tester_config,
Ken Russell (switch to Gerrit) 2017/01/10 00:22:34 Could you add a TODO about refactoring this and ge
1626 test, test_config, is_fyi):
1627 if not should_run_on_tester(tester_name, tester_config, test_config, is_fyi):
1628 return None
1629 test_args = ['-v']
1630 if 'args' in test_config:
1631 test_args.extend(substitute_args(tester_config, test_config['args']))
1632 if 'desktop_args' in test_config and not is_android(tester_config):
1633 test_args.extend(substitute_args(tester_config,
1634 test_config['desktop_args']))
1635 if 'android_args' in test_config and is_android(tester_config):
1636 test_args.extend(substitute_args(tester_config,
1637 test_config['android_args']))
1638 # The step name must end in 'test' or 'tests' in order for the
1639 # results to automatically show up on the flakiness dashboard.
1640 # (At least, this was true some time ago.) Continue to use this
1641 # naming convention for the time being to minimize changes.
1642 step_name = test
1643 if not (step_name.endswith('test') or step_name.endswith('tests')):
1644 step_name = '%s_tests' % step_name
1645 # Prepend Telemetry GPU-specific flags.
Ken Russell (switch to Gerrit) 2017/01/10 00:22:33 Update: "Prepend GPU-specific flags."
1646 swarming = {
1647 # Always say this is true regardless of whether the tester
1648 # supports swarming. It doesn't hurt.
1649 'can_use_on_swarming_builders': True,
1650 'dimension_sets': tester_config['swarming_dimensions']
1651 }
1652 if 'swarming' in test_config:
1653 swarming.update(test_config['swarming'])
1654 result = {
1655 'args': test_args,
1656 'isolate_name': test,
1657 'name': step_name,
1658 'swarming': swarming,
1659 }
1660 if 'non_precommit_args' in test_config:
1661 result['non_precommit_args'] = test_config['non_precommit_args']
1662 if 'precommit_args' in test_config:
1663 result['precommit_args'] = test_config['precommit_args']
1664 return result
1665
1666 def generate_non_telemetry_isolated_tests(tester_name, tester_config,
1667 test_dictionary, is_fyi):
1668 isolated_scripts = []
1669 for test_name, test_config in sorted(test_dictionary.iteritems()):
1670 test = generate_non_telemetry_isolated_test(
1671 tester_name, tester_config, test_name, test_config, is_fyi)
1672 if test:
1673 isolated_scripts.append(test)
1674 return isolated_scripts
1675
1599 def generate_all_tests(waterfall, is_fyi): 1676 def generate_all_tests(waterfall, is_fyi):
1600 tests = {} 1677 tests = {}
1601 for builder, config in waterfall['builders'].iteritems(): 1678 for builder, config in waterfall['builders'].iteritems():
1602 tests[builder] = config 1679 tests[builder] = config
1603 for name, config in waterfall['testers'].iteritems(): 1680 for name, config in waterfall['testers'].iteritems():
1604 gtests = generate_gtests(name, config, COMMON_GTESTS, is_fyi) 1681 gtests = generate_gtests(name, config, COMMON_GTESTS, is_fyi)
1605 isolated_scripts = \ 1682 isolated_scripts = \
1606 generate_telemetry_tests( 1683 generate_telemetry_tests(
1607 name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi) 1684 name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi) + \
1685 generate_non_telemetry_isolated_tests(name, config,
1686 NON_TELEMETRY_ISOLATED_SCRIPT_TESTS, is_fyi)
1608 tests[name] = { 1687 tests[name] = {
1609 'gtest_tests': sorted(gtests, key=lambda x: x['test']), 1688 'gtest_tests': sorted(gtests, key=lambda x: x['test']),
1610 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) 1689 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
1611 } 1690 }
1612 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} 1691 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
1613 tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {} 1692 tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
1614 filename = 'chromium.gpu.fyi.json' if is_fyi else 'chromium.gpu.json' 1693 filename = 'chromium.gpu.fyi.json' if is_fyi else 'chromium.gpu.json'
1615 with open(os.path.join(SRC_DIR, 'testing', 'buildbot', filename), 'wb') as fp: 1694 with open(os.path.join(SRC_DIR, 'testing', 'buildbot', filename), 'wb') as fp:
1616 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 1695 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
1617 fp.write('\n') 1696 fp.write('\n')
1618 1697
1619 def main(): 1698 def main():
1620 generate_all_tests(FYI_WATERFALL, True) 1699 generate_all_tests(FYI_WATERFALL, True)
1621 generate_all_tests(WATERFALL, False) 1700 generate_all_tests(WATERFALL, False)
1622 return 0 1701 return 0
1623 1702
1624 if __name__ == "__main__": 1703 if __name__ == "__main__":
1625 sys.exit(main()) 1704 sys.exit(main())
OLDNEW
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698