| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
| 8 | 8 |
| 9 import collections | 9 import collections |
| 10 import logging | 10 import logging |
| (...skipping 21 matching lines...) Expand all Loading... |
| 32 from pylib.host_driven import setup as host_driven_setup | 32 from pylib.host_driven import setup as host_driven_setup |
| 33 from pylib.instrumentation import setup as instrumentation_setup | 33 from pylib.instrumentation import setup as instrumentation_setup |
| 34 from pylib.instrumentation import test_options as instrumentation_test_options | 34 from pylib.instrumentation import test_options as instrumentation_test_options |
| 35 from pylib.junit import setup as junit_setup | 35 from pylib.junit import setup as junit_setup |
| 36 from pylib.junit import test_dispatcher as junit_dispatcher | 36 from pylib.junit import test_dispatcher as junit_dispatcher |
| 37 from pylib.monkey import setup as monkey_setup | 37 from pylib.monkey import setup as monkey_setup |
| 38 from pylib.monkey import test_options as monkey_test_options | 38 from pylib.monkey import test_options as monkey_test_options |
| 39 from pylib.perf import setup as perf_setup | 39 from pylib.perf import setup as perf_setup |
| 40 from pylib.perf import test_options as perf_test_options | 40 from pylib.perf import test_options as perf_test_options |
| 41 from pylib.perf import test_runner as perf_test_runner | 41 from pylib.perf import test_runner as perf_test_runner |
| 42 from pylib.results import json_results |
| 43 from pylib.results import report_results |
| 42 from pylib.uiautomator import setup as uiautomator_setup | 44 from pylib.uiautomator import setup as uiautomator_setup |
| 43 from pylib.uiautomator import test_options as uiautomator_test_options | 45 from pylib.uiautomator import test_options as uiautomator_test_options |
| 44 from pylib.utils import apk_helper | 46 from pylib.utils import apk_helper |
| 45 from pylib.utils import command_option_parser | 47 from pylib.utils import command_option_parser |
| 46 from pylib.utils import report_results | |
| 47 from pylib.utils import reraiser_thread | 48 from pylib.utils import reraiser_thread |
| 48 from pylib.utils import run_tests_helper | 49 from pylib.utils import run_tests_helper |
| 49 | 50 |
| 50 | 51 |
| 51 def AddCommonOptions(option_parser): | 52 def AddCommonOptions(option_parser): |
| 52 """Adds all common options to |option_parser|.""" | 53 """Adds all common options to |option_parser|.""" |
| 53 | 54 |
| 54 group = optparse.OptionGroup(option_parser, 'Common Options') | 55 group = optparse.OptionGroup(option_parser, 'Common Options') |
| 55 default_build_type = os.environ.get('BUILDTYPE', 'Debug') | 56 default_build_type = os.environ.get('BUILDTYPE', 'Debug') |
| 56 group.add_option('--debug', action='store_const', const='Debug', | 57 group.add_option('--debug', action='store_const', const='Debug', |
| (...skipping 30 matching lines...) Expand all Loading... |
| 87 help=('Run the test scripts in platform mode, which ' | 88 help=('Run the test scripts in platform mode, which ' |
| 88 'conceptually separates the test runner from the ' | 89 'conceptually separates the test runner from the ' |
| 89 '"device" (local or remote, real or emulated) on ' | 90 '"device" (local or remote, real or emulated) on ' |
| 90 'which the tests are running. [experimental]')) | 91 'which the tests are running. [experimental]')) |
| 91 group.add_option('-e', '--environment', default='local', | 92 group.add_option('-e', '--environment', default='local', |
| 92 help=('Test environment to run in. Must be one of: %s' % | 93 help=('Test environment to run in. Must be one of: %s' % |
| 93 ', '.join(constants.VALID_ENVIRONMENTS))) | 94 ', '.join(constants.VALID_ENVIRONMENTS))) |
| 94 group.add_option('--adb-path', | 95 group.add_option('--adb-path', |
| 95 help=('Specify the absolute path of the adb binary that ' | 96 help=('Specify the absolute path of the adb binary that ' |
| 96 'should be used.')) | 97 'should be used.')) |
| 98 group.add_option('--json-results-file', dest='json_results_file', |
| 99 help='If set, will dump results in JSON format ' |
| 100 'to specified file.') |
| 97 option_parser.add_option_group(group) | 101 option_parser.add_option_group(group) |
| 98 | 102 |
| 99 | 103 |
| 100 def ProcessCommonOptions(options, error_func): | 104 def ProcessCommonOptions(options, error_func): |
| 101 """Processes and handles all common options.""" | 105 """Processes and handles all common options.""" |
| 102 run_tests_helper.SetLogLevel(options.verbose_count) | 106 run_tests_helper.SetLogLevel(options.verbose_count) |
| 103 constants.SetBuildType(options.build_type) | 107 constants.SetBuildType(options.build_type) |
| 104 if options.build_directory: | 108 if options.build_directory: |
| 105 constants.SetBuildDirectory(options.build_directory) | 109 constants.SetBuildDirectory(options.build_directory) |
| 106 if options.output_directory: | 110 if options.output_directory: |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 154 help='Additional arguments to pass to the test.') | 158 help='Additional arguments to pass to the test.') |
| 155 option_parser.add_option('-t', dest='timeout', | 159 option_parser.add_option('-t', dest='timeout', |
| 156 help='Timeout to wait for each test', | 160 help='Timeout to wait for each test', |
| 157 type='int', | 161 type='int', |
| 158 default=60) | 162 default=60) |
| 159 option_parser.add_option('--isolate_file_path', | 163 option_parser.add_option('--isolate_file_path', |
| 160 '--isolate-file-path', | 164 '--isolate-file-path', |
| 161 dest='isolate_file_path', | 165 dest='isolate_file_path', |
| 162 help='.isolate file path to override the default ' | 166 help='.isolate file path to override the default ' |
| 163 'path') | 167 'path') |
| 164 # TODO(gkanwar): Move these to Common Options once we have the plumbing | 168 |
| 165 # in our other test types to handle these commands | |
| 166 AddCommonOptions(option_parser) | 169 AddCommonOptions(option_parser) |
| 167 AddDeviceOptions(option_parser) | 170 AddDeviceOptions(option_parser) |
| 168 | 171 |
| 169 | 172 |
| 170 def AddLinkerTestOptions(option_parser): | 173 def AddLinkerTestOptions(option_parser): |
| 171 option_parser.usage = '%prog linker' | 174 option_parser.usage = '%prog linker' |
| 172 option_parser.commands_dict = {} | 175 option_parser.commands_dict = {} |
| 173 option_parser.example = '%prog linker' | 176 option_parser.example = '%prog linker' |
| 174 | 177 |
| 175 option_parser.add_option('-f', '--gtest-filter', dest='test_filter', | 178 option_parser.add_option('-f', '--gtest-filter', dest='test_filter', |
| (...skipping 455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 631 | 634 |
| 632 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 635 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| 633 exit_code = test_exit_code | 636 exit_code = test_exit_code |
| 634 | 637 |
| 635 report_results.LogFull( | 638 report_results.LogFull( |
| 636 results=results, | 639 results=results, |
| 637 test_type='Unit test', | 640 test_type='Unit test', |
| 638 test_package=suite_name, | 641 test_package=suite_name, |
| 639 flakiness_server=options.flakiness_dashboard_server) | 642 flakiness_server=options.flakiness_dashboard_server) |
| 640 | 643 |
| 644 if options.json_results_file: |
| 645 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 646 |
| 641 if os.path.isdir(constants.ISOLATE_DEPS_DIR): | 647 if os.path.isdir(constants.ISOLATE_DEPS_DIR): |
| 642 shutil.rmtree(constants.ISOLATE_DEPS_DIR) | 648 shutil.rmtree(constants.ISOLATE_DEPS_DIR) |
| 643 | 649 |
| 644 return exit_code | 650 return exit_code |
| 645 | 651 |
| 646 | 652 |
| 647 def _RunLinkerTests(options, devices): | 653 def _RunLinkerTests(options, devices): |
| 648 """Subcommand of RunTestsCommands which runs linker tests.""" | 654 """Subcommand of RunTestsCommands which runs linker tests.""" |
| 649 runner_factory, tests = linker_setup.Setup(options, devices) | 655 runner_factory, tests = linker_setup.Setup(options, devices) |
| 650 | 656 |
| 651 results, exit_code = test_dispatcher.RunTests( | 657 results, exit_code = test_dispatcher.RunTests( |
| 652 tests, runner_factory, devices, shard=True, test_timeout=60, | 658 tests, runner_factory, devices, shard=True, test_timeout=60, |
| 653 num_retries=options.num_retries) | 659 num_retries=options.num_retries) |
| 654 | 660 |
| 655 report_results.LogFull( | 661 report_results.LogFull( |
| 656 results=results, | 662 results=results, |
| 657 test_type='Linker test', | 663 test_type='Linker test', |
| 658 test_package='ChromiumLinkerTest') | 664 test_package='ChromiumLinkerTest') |
| 659 | 665 |
| 666 if options.json_results_file: |
| 667 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 668 |
| 660 return exit_code | 669 return exit_code |
| 661 | 670 |
| 662 | 671 |
| 663 def _RunInstrumentationTests(options, error_func, devices): | 672 def _RunInstrumentationTests(options, error_func, devices): |
| 664 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | 673 """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
| 665 instrumentation_options = ProcessInstrumentationOptions(options, error_func) | 674 instrumentation_options = ProcessInstrumentationOptions(options, error_func) |
| 666 | 675 |
| 667 if len(devices) > 1 and options.wait_for_debugger: | 676 if len(devices) > 1 and options.wait_for_debugger: |
| 668 logging.warning('Debugger can not be sharded, using first available device') | 677 logging.warning('Debugger can not be sharded, using first available device') |
| 669 devices = devices[:1] | 678 devices = devices[:1] |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 701 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, | 710 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, |
| 702 options.device_flags) | 711 options.device_flags) |
| 703 | 712 |
| 704 report_results.LogFull( | 713 report_results.LogFull( |
| 705 results=results, | 714 results=results, |
| 706 test_type='Instrumentation', | 715 test_type='Instrumentation', |
| 707 test_package=os.path.basename(options.test_apk), | 716 test_package=os.path.basename(options.test_apk), |
| 708 annotation=options.annotations, | 717 annotation=options.annotations, |
| 709 flakiness_server=options.flakiness_dashboard_server) | 718 flakiness_server=options.flakiness_dashboard_server) |
| 710 | 719 |
| 720 if options.json_results_file: |
| 721 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 722 |
| 711 return exit_code | 723 return exit_code |
| 712 | 724 |
| 713 | 725 |
| 714 def _RunUIAutomatorTests(options, error_func, devices): | 726 def _RunUIAutomatorTests(options, error_func, devices): |
| 715 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 727 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
| 716 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) | 728 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) |
| 717 | 729 |
| 718 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | 730 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
| 719 | 731 |
| 720 results, exit_code = test_dispatcher.RunTests( | 732 results, exit_code = test_dispatcher.RunTests( |
| 721 tests, runner_factory, devices, shard=True, test_timeout=None, | 733 tests, runner_factory, devices, shard=True, test_timeout=None, |
| 722 num_retries=options.num_retries) | 734 num_retries=options.num_retries) |
| 723 | 735 |
| 724 report_results.LogFull( | 736 report_results.LogFull( |
| 725 results=results, | 737 results=results, |
| 726 test_type='UIAutomator', | 738 test_type='UIAutomator', |
| 727 test_package=os.path.basename(options.test_jar), | 739 test_package=os.path.basename(options.test_jar), |
| 728 annotation=options.annotations, | 740 annotation=options.annotations, |
| 729 flakiness_server=options.flakiness_dashboard_server) | 741 flakiness_server=options.flakiness_dashboard_server) |
| 730 | 742 |
| 743 if options.json_results_file: |
| 744 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 745 |
| 731 return exit_code | 746 return exit_code |
| 732 | 747 |
| 733 | 748 |
| 734 def _RunJUnitTests(options, error_func): | 749 def _RunJUnitTests(options, error_func): |
| 735 """Subcommand of RunTestsCommand which runs junit tests.""" | 750 """Subcommand of RunTestsCommand which runs junit tests.""" |
| 736 junit_options = ProcessJUnitTestOptions(options, error_func) | 751 junit_options = ProcessJUnitTestOptions(options, error_func) |
| 737 runner_factory, tests = junit_setup.Setup(junit_options) | 752 runner_factory, tests = junit_setup.Setup(junit_options) |
| 738 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) | 753 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) |
| 739 | 754 |
| 740 return exit_code | 755 return exit_code |
| 741 | 756 |
| 742 | 757 |
| 743 def _RunMonkeyTests(options, error_func, devices): | 758 def _RunMonkeyTests(options, error_func, devices): |
| 744 """Subcommand of RunTestsCommands which runs monkey tests.""" | 759 """Subcommand of RunTestsCommands which runs monkey tests.""" |
| 745 monkey_options = ProcessMonkeyTestOptions(options, error_func) | 760 monkey_options = ProcessMonkeyTestOptions(options, error_func) |
| 746 | 761 |
| 747 runner_factory, tests = monkey_setup.Setup(monkey_options) | 762 runner_factory, tests = monkey_setup.Setup(monkey_options) |
| 748 | 763 |
| 749 results, exit_code = test_dispatcher.RunTests( | 764 results, exit_code = test_dispatcher.RunTests( |
| 750 tests, runner_factory, devices, shard=False, test_timeout=None, | 765 tests, runner_factory, devices, shard=False, test_timeout=None, |
| 751 num_retries=options.num_retries) | 766 num_retries=options.num_retries) |
| 752 | 767 |
| 753 report_results.LogFull( | 768 report_results.LogFull( |
| 754 results=results, | 769 results=results, |
| 755 test_type='Monkey', | 770 test_type='Monkey', |
| 756 test_package='Monkey') | 771 test_package='Monkey') |
| 757 | 772 |
| 773 if options.json_results_file: |
| 774 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 775 |
| 758 return exit_code | 776 return exit_code |
| 759 | 777 |
| 760 | 778 |
| 761 def _RunPerfTests(options, args, error_func): | 779 def _RunPerfTests(options, args, error_func): |
| 762 """Subcommand of RunTestsCommands which runs perf tests.""" | 780 """Subcommand of RunTestsCommands which runs perf tests.""" |
| 763 perf_options = ProcessPerfTestOptions(options, args, error_func) | 781 perf_options = ProcessPerfTestOptions(options, args, error_func) |
| 764 | 782 |
| 765 # Just save a simple json with a list of test names. | 783 # Just save a simple json with a list of test names. |
| 766 if perf_options.output_json_list: | 784 if perf_options.output_json_list: |
| 767 return perf_test_runner.OutputJsonList( | 785 return perf_test_runner.OutputJsonList( |
| (...skipping 11 matching lines...) Expand all Loading... |
| 779 # which increases throughput but have no affinity. | 797 # which increases throughput but have no affinity. |
| 780 results, _ = test_dispatcher.RunTests( | 798 results, _ = test_dispatcher.RunTests( |
| 781 tests, runner_factory, devices, shard=False, test_timeout=None, | 799 tests, runner_factory, devices, shard=False, test_timeout=None, |
| 782 num_retries=options.num_retries) | 800 num_retries=options.num_retries) |
| 783 | 801 |
| 784 report_results.LogFull( | 802 report_results.LogFull( |
| 785 results=results, | 803 results=results, |
| 786 test_type='Perf', | 804 test_type='Perf', |
| 787 test_package='Perf') | 805 test_package='Perf') |
| 788 | 806 |
| 807 if options.json_results_file: |
| 808 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 809 |
| 789 if perf_options.single_step: | 810 if perf_options.single_step: |
| 790 return perf_test_runner.PrintTestOutput('single_step') | 811 return perf_test_runner.PrintTestOutput('single_step') |
| 791 | 812 |
| 792 perf_test_runner.PrintSummary(tests) | 813 perf_test_runner.PrintSummary(tests) |
| 793 | 814 |
| 794 # Always return 0 on the sharding stage. Individual tests exit_code | 815 # Always return 0 on the sharding stage. Individual tests exit_code |
| 795 # will be returned on the print_step stage. | 816 # will be returned on the print_step stage. |
| 796 return 0 | 817 return 0 |
| 797 | 818 |
| 798 | 819 |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 919 options, env, test, option_parser.error) as test_run: | 940 options, env, test, option_parser.error) as test_run: |
| 920 results = test_run.RunTests() | 941 results = test_run.RunTests() |
| 921 | 942 |
| 922 report_results.LogFull( | 943 report_results.LogFull( |
| 923 results=results, | 944 results=results, |
| 924 test_type=test.TestType(), | 945 test_type=test.TestType(), |
| 925 test_package=test_run.TestPackage(), | 946 test_package=test_run.TestPackage(), |
| 926 annotation=options.annotations, | 947 annotation=options.annotations, |
| 927 flakiness_server=options.flakiness_dashboard_server) | 948 flakiness_server=options.flakiness_dashboard_server) |
| 928 | 949 |
| 950 if options.json_results_file: |
| 951 json_results.GenerateJsonResultsFile( |
| 952 results, options.json_results_file) |
| 953 |
| 929 return results | 954 return results |
| 930 | 955 |
| 931 | 956 |
| 932 def HelpCommand(command, _options, args, option_parser): | 957 def HelpCommand(command, _options, args, option_parser): |
| 933 """Display help for a certain command, or overall help. | 958 """Display help for a certain command, or overall help. |
| 934 | 959 |
| 935 Args: | 960 Args: |
| 936 command: String indicating the command that was received to trigger | 961 command: String indicating the command that was received to trigger |
| 937 this function. | 962 this function. |
| 938 options: optparse options dictionary. unused. | 963 options: optparse options dictionary. unused. |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1002 | 1027 |
| 1003 def main(): | 1028 def main(): |
| 1004 signal.signal(signal.SIGUSR1, DumpThreadStacks) | 1029 signal.signal(signal.SIGUSR1, DumpThreadStacks) |
| 1005 option_parser = command_option_parser.CommandOptionParser( | 1030 option_parser = command_option_parser.CommandOptionParser( |
| 1006 commands_dict=VALID_COMMANDS) | 1031 commands_dict=VALID_COMMANDS) |
| 1007 return command_option_parser.ParseAndExecute(option_parser) | 1032 return command_option_parser.ParseAndExecute(option_parser) |
| 1008 | 1033 |
| 1009 | 1034 |
| 1010 if __name__ == '__main__': | 1035 if __name__ == '__main__': |
| 1011 sys.exit(main()) | 1036 sys.exit(main()) |
| OLD | NEW |