Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(118)

Side by Side Diff: content/test/gpu/generate_buildbot_json.py

Issue 2591813002: Run angle_perftests on the GPU FYI bots. (Closed)
Patch Set: Use shared base generator func. Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved. 2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Script to generate chromium.gpu.json and chromium.gpu.fyi.json in 6 """Script to generate chromium.gpu.json and chromium.gpu.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is 7 the src/testing/buildbot directory. Maintaining these files by hand is
8 too unwieldy. 8 too unwieldy.
9 """ 9 """
10 10
(...skipping 1381 matching lines...) Expand 10 before | Expand all | Expand 10 after
1392 '../../content/test/data/gpu/webgl2_conformance_tests_output.json', 1392 '../../content/test/data/gpu/webgl2_conformance_tests_output.json',
1393 ], 1393 ],
1394 'swarming': { 1394 'swarming': {
1395 # These tests currently take about an hour and fifteen minutes 1395 # These tests currently take about an hour and fifteen minutes
1396 # to run. Split them into roughly 5-minute shards. 1396 # to run. Split them into roughly 5-minute shards.
1397 'shards': 15, 1397 'shards': 15,
1398 }, 1398 },
1399 }, 1399 },
1400 } 1400 }
1401 1401
1402 # These isolated tests don't use telemetry. They need to be placed in the
1403 # isolated_scripts section of the generated json.
1404 NON_TELEMETRY_ISOLATED_SCRIPT_TESTS = {
1405 # We run angle_perftests on the ANGLE CQ to ensure the tests don't crash.
1406 'angle_perftests': {
1407 'tester_configs': [
1408 {
1409 'fyi_only': True,
1410 'run_on_optional': True,
1411 # Run on the Win/Linux Release NVIDIA bots.
1412 'build_configs': ['Release'],
1413 'swarming_dimension_sets': [
1414 {
1415 'gpu': '10de:104a',
1416 'os': 'Windows-2008ServerR2-SP1'
1417 },
1418 {
1419 'gpu': '10de:104a',
1420 'os': 'Linux'
1421 }
1422 ],
1423 },
1424 ],
1425 },
1426 }
1427
1402 def substitute_args(tester_config, args): 1428 def substitute_args(tester_config, args):
1403 """Substitutes the ${os_type} variable in |args| from the 1429 """Substitutes the ${os_type} variable in |args| from the
1404 tester_config's "os_type" property. 1430 tester_config's "os_type" property.
1405 """ 1431 """
1406 substitutions = { 1432 substitutions = {
1407 'os_type': tester_config['os_type'] 1433 'os_type': tester_config['os_type']
1408 } 1434 }
1409 return [string.Template(arg).safe_substitute(substitutions) for arg in args] 1435 return [string.Template(arg).safe_substitute(substitutions) for arg in args]
1410 1436
1411 def matches_swarming_dimensions(tester_config, dimension_sets): 1437 def matches_swarming_dimensions(tester_config, dimension_sets):
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
1544 # Don't put the desktop_swarming in the JSON. 1570 # Don't put the desktop_swarming in the JSON.
1545 result.pop('desktop_swarming') 1571 result.pop('desktop_swarming')
1546 1572
1547 # This flag only has an effect on the Linux bots that run tests 1573 # This flag only has an effect on the Linux bots that run tests
1548 # locally (as opposed to via Swarming), which are only those couple 1574 # locally (as opposed to via Swarming), which are only those couple
1549 # on the chromium.gpu.fyi waterfall. Still, there is no harm in 1575 # on the chromium.gpu.fyi waterfall. Still, there is no harm in
1550 # specifying it everywhere. 1576 # specifying it everywhere.
1551 result['use_xvfb'] = False 1577 result['use_xvfb'] = False
1552 return result 1578 return result
1553 1579
1554 def generate_telemetry_test(tester_name, tester_config, 1580 def generate_gtests(tester_name, tester_config, test_dictionary, is_fyi):
1555 test, test_config, is_fyi): 1581 # The relative ordering of some of the tests is important to
1582 # minimize differences compared to the handwritten JSON files, since
1583 # Python's sorts are stable and there are some tests with the same
1584 # key (see gles2_conform_d3d9_test and similar variants). Avoid
1585 # losing the order by avoiding coalescing the dictionaries into one.
1586 gtests = []
1587 for test_name, test_config in sorted(test_dictionary.iteritems()):
1588 test = generate_gtest(tester_name, tester_config,
1589 test_name, test_config, is_fyi)
1590 if test:
1591 # generate_gtest may veto the test generation on this platform.
1592 gtests.append(test)
1593 return gtests
1594
1595 def generate_isolated_test(tester_name, tester_config, test, test_config,
1596 is_fyi, extra_browser_args, isolate_name,
1597 override_compile_targets, prefix_args):
1556 if not should_run_on_tester(tester_name, tester_config, test_config, is_fyi): 1598 if not should_run_on_tester(tester_name, tester_config, test_config, is_fyi):
1557 return None 1599 return None
1558 test_args = ['-v'] 1600 test_args = ['-v']
1559 # --expose-gc allows the WebGL conformance tests to more reliably 1601 extra_browser_args_string = ""
1560 # reproduce GC-related bugs in the V8 bindings. 1602 if extra_browser_args != None:
1561 extra_browser_args_string = ( 1603 extra_browser_args_string += ' '.join(extra_browser_args)
1562 '--enable-logging=stderr --js-flags=--expose-gc')
1563 if 'extra_browser_args' in test_config: 1604 if 'extra_browser_args' in test_config:
1564 extra_browser_args_string += ' ' + ' '.join( 1605 extra_browser_args_string += ' ' + ' '.join(
1565 test_config['extra_browser_args']) 1606 test_config['extra_browser_args'])
1566 test_args.append('--extra-browser-args=' + extra_browser_args_string) 1607 if extra_browser_args_string != "":
1608 test_args.append('--extra-browser-args=' + extra_browser_args_string)
1567 if 'args' in test_config: 1609 if 'args' in test_config:
1568 test_args.extend(substitute_args(tester_config, test_config['args'])) 1610 test_args.extend(substitute_args(tester_config, test_config['args']))
1569 if 'desktop_args' in test_config and not is_android(tester_config): 1611 if 'desktop_args' in test_config and not is_android(tester_config):
1570 test_args.extend(substitute_args(tester_config, 1612 test_args.extend(substitute_args(tester_config,
1571 test_config['desktop_args'])) 1613 test_config['desktop_args']))
1572 if 'android_args' in test_config and is_android(tester_config): 1614 if 'android_args' in test_config and is_android(tester_config):
1573 test_args.extend(substitute_args(tester_config, 1615 test_args.extend(substitute_args(tester_config,
1574 test_config['android_args'])) 1616 test_config['android_args']))
1575 # The step name must end in 'test' or 'tests' in order for the 1617 # The step name must end in 'test' or 'tests' in order for the
1576 # results to automatically show up on the flakiness dashboard. 1618 # results to automatically show up on the flakiness dashboard.
1577 # (At least, this was true some time ago.) Continue to use this 1619 # (At least, this was true some time ago.) Continue to use this
1578 # naming convention for the time being to minimize changes. 1620 # naming convention for the time being to minimize changes.
1579 step_name = test 1621 step_name = test
1580 if not (step_name.endswith('test') or step_name.endswith('tests')): 1622 if not (step_name.endswith('test') or step_name.endswith('tests')):
1581 step_name = '%s_tests' % step_name 1623 step_name = '%s_tests' % step_name
1582 # Prepend Telemetry GPU-specific flags. 1624 # Prepend GPU-specific flags.
1583 benchmark_name = test_config.get('target_name') or test
1584 prefix_args = [
1585 benchmark_name,
1586 '--show-stdout',
1587 '--browser=%s' % tester_config['build_config'].lower()
1588 ]
1589 swarming = { 1625 swarming = {
1590 # Always say this is true regardless of whether the tester 1626 # Always say this is true regardless of whether the tester
1591 # supports swarming. It doesn't hurt. 1627 # supports swarming. It doesn't hurt.
1592 'can_use_on_swarming_builders': True, 1628 'can_use_on_swarming_builders': True,
1593 'dimension_sets': tester_config['swarming_dimensions'] 1629 'dimension_sets': tester_config['swarming_dimensions']
1594 } 1630 }
1595 if 'swarming' in test_config: 1631 if 'swarming' in test_config:
1596 swarming.update(test_config['swarming']) 1632 swarming.update(test_config['swarming'])
1597 result = { 1633 result = {
1598 'args': prefix_args + test_args, 1634 'args': prefix_args + test_args,
1599 'isolate_name': 'telemetry_gpu_integration_test', 1635 'isolate_name': isolate_name,
1600 'name': step_name, 1636 'name': step_name,
1601 'override_compile_targets': ['telemetry_gpu_integration_test_run'],
1602 'swarming': swarming, 1637 'swarming': swarming,
1603 } 1638 }
1639 if override_compile_targets != None:
1640 result['override_compile_targets'] = override_compile_targets
1604 if 'non_precommit_args' in test_config: 1641 if 'non_precommit_args' in test_config:
1605 result['non_precommit_args'] = test_config['non_precommit_args'] 1642 result['non_precommit_args'] = test_config['non_precommit_args']
1606 if 'precommit_args' in test_config: 1643 if 'precommit_args' in test_config:
1607 result['precommit_args'] = test_config['precommit_args'] 1644 result['precommit_args'] = test_config['precommit_args']
1608 return result 1645 return result
1609 1646
1610 def generate_gtests(tester_name, tester_config, test_dictionary, is_fyi): 1647 def generate_telemetry_test(tester_name, tester_config,
1611 # The relative ordering of some of the tests is important to 1648 test, test_config, is_fyi):
1612 # minimize differences compared to the handwritten JSON files, since 1649 extra_browser_args = ['--enable-logging=stderr', '--js-flags=--expose-gc']
1613 # Python's sorts are stable and there are some tests with the same 1650 benchmark_name = test_config.get('target_name') or test
1614 # key (see gles2_conform_d3d9_test and similar variants). Avoid 1651 prefix_args = [
1615 # losing the order by avoiding coalescing the dictionaries into one. 1652 benchmark_name,
1616 gtests = [] 1653 '--show-stdout',
1617 for test_name, test_config in sorted(test_dictionary.iteritems()): 1654 '--browser=%s' % tester_config['build_config'].lower()
1618 test = generate_gtest(tester_name, tester_config, 1655 ]
1619 test_name, test_config, is_fyi) 1656 return generate_isolated_test(tester_name, tester_config, test,
1620 if test: 1657 test_config, is_fyi, extra_browser_args,
1621 # generate_gtest may veto the test generation on this platform. 1658 'telemetry_gpu_integration_test',
1622 gtests.append(test) 1659 ['telemetry_gpu_integration_test_run'],
1623 return gtests 1660 prefix_args)
1624 1661
1625 def generate_telemetry_tests(tester_name, tester_config, 1662 def generate_telemetry_tests(tester_name, tester_config,
1626 test_dictionary, is_fyi): 1663 test_dictionary, is_fyi):
1627 isolated_scripts = [] 1664 isolated_scripts = []
1628 for test_name, test_config in sorted(test_dictionary.iteritems()): 1665 for test_name, test_config in sorted(test_dictionary.iteritems()):
1629 test = generate_telemetry_test( 1666 test = generate_telemetry_test(
1630 tester_name, tester_config, test_name, test_config, is_fyi) 1667 tester_name, tester_config, test_name, test_config, is_fyi)
1631 if test: 1668 if test:
1632 isolated_scripts.append(test) 1669 isolated_scripts.append(test)
1633 return isolated_scripts 1670 return isolated_scripts
1634 1671
1672 def generate_non_telemetry_isolated_test(tester_name, tester_config,
1673 test, test_config, is_fyi):
1674 return generate_isolated_test(tester_name, tester_config, test,
1675 test_config, is_fyi, None, test, None, [])
1676
1677 def generate_non_telemetry_isolated_tests(tester_name, tester_config,
1678 test_dictionary, is_fyi):
1679 isolated_scripts = []
1680 for test_name, test_config in sorted(test_dictionary.iteritems()):
1681 test = generate_non_telemetry_isolated_test(
1682 tester_name, tester_config, test_name, test_config, is_fyi)
1683 if test:
1684 isolated_scripts.append(test)
1685 return isolated_scripts
1686
1635 def generate_all_tests(waterfall, is_fyi): 1687 def generate_all_tests(waterfall, is_fyi):
1636 tests = {} 1688 tests = {}
1637 for builder, config in waterfall['builders'].iteritems(): 1689 for builder, config in waterfall['builders'].iteritems():
1638 tests[builder] = config 1690 tests[builder] = config
1639 for name, config in waterfall['testers'].iteritems(): 1691 for name, config in waterfall['testers'].iteritems():
1640 gtests = generate_gtests(name, config, COMMON_GTESTS, is_fyi) 1692 gtests = generate_gtests(name, config, COMMON_GTESTS, is_fyi)
1641 isolated_scripts = \ 1693 isolated_scripts = \
1642 generate_telemetry_tests( 1694 generate_telemetry_tests(
1643 name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi) 1695 name, config, TELEMETRY_GPU_INTEGRATION_TESTS, is_fyi) + \
1696 generate_non_telemetry_isolated_tests(name, config,
1697 NON_TELEMETRY_ISOLATED_SCRIPT_TESTS, is_fyi)
1644 tests[name] = { 1698 tests[name] = {
1645 'gtest_tests': sorted(gtests, key=lambda x: x['test']), 1699 'gtest_tests': sorted(gtests, key=lambda x: x['test']),
1646 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) 1700 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
1647 } 1701 }
1648 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {} 1702 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
1649 tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {} 1703 tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
1650 filename = 'chromium.gpu.fyi.json' if is_fyi else 'chromium.gpu.json' 1704 filename = 'chromium.gpu.fyi.json' if is_fyi else 'chromium.gpu.json'
1651 with open(os.path.join(SRC_DIR, 'testing', 'buildbot', filename), 'wb') as fp: 1705 with open(os.path.join(SRC_DIR, 'testing', 'buildbot', filename), 'wb') as fp:
1652 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True) 1706 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
1653 fp.write('\n') 1707 fp.write('\n')
1654 1708
1655 def main(): 1709 def main():
1656 generate_all_tests(FYI_WATERFALL, True) 1710 generate_all_tests(FYI_WATERFALL, True)
1657 generate_all_tests(WATERFALL, False) 1711 generate_all_tests(WATERFALL, False)
1658 return 0 1712 return 0
1659 1713
1660 if __name__ == "__main__": 1714 if __name__ == "__main__":
1661 sys.exit(main()) 1715 sys.exit(main())
OLDNEW
« no previous file with comments | « chrome/test/BUILD.gn ('k') | testing/buildbot/chromium.gpu.fyi.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698