| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
| 8 | 8 |
| 9 import collections | 9 import collections |
| 10 import logging | 10 import logging |
| (...skipping 615 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 626 num_retries=options.num_retries) | 626 num_retries=options.num_retries) |
| 627 | 627 |
| 628 report_results.LogFull( | 628 report_results.LogFull( |
| 629 results=results, | 629 results=results, |
| 630 test_type='Monkey', | 630 test_type='Monkey', |
| 631 test_package='Monkey') | 631 test_package='Monkey') |
| 632 | 632 |
| 633 return exit_code | 633 return exit_code |
| 634 | 634 |
| 635 | 635 |
| 636 def _RunPerfTests(options, args, error_func, devices): | 636 def _RunPerfTests(options, args, error_func): |
| 637 """Subcommand of RunTestsCommands which runs perf tests.""" | 637 """Subcommand of RunTestsCommands which runs perf tests.""" |
| 638 perf_options = ProcessPerfTestOptions(options, args, error_func) | 638 perf_options = ProcessPerfTestOptions(options, args, error_func) |
| 639 # Just print the results from a single previously executed step. | 639 # Just print the results from a single previously executed step. |
| 640 if perf_options.print_step: | 640 if perf_options.print_step: |
| 641 return perf_test_runner.PrintTestOutput(perf_options.print_step) | 641 return perf_test_runner.PrintTestOutput(perf_options.print_step) |
| 642 | 642 |
| 643 runner_factory, tests = perf_setup.Setup(perf_options) | 643 runner_factory, tests, devices = perf_setup.Setup(perf_options) |
| 644 | 644 |
| 645 # shard=False means that each device will get the full list of tests |
| 646 # and then each one will decide their own affinity. |
| 647 # shard=True means each device will pop the next test available from a queue, |
| 648 # which increases throughput but have no affinity. |
| 645 results, _ = test_dispatcher.RunTests( | 649 results, _ = test_dispatcher.RunTests( |
| 646 tests, runner_factory, devices, shard=True, test_timeout=None, | 650 tests, runner_factory, devices, shard=False, test_timeout=None, |
| 647 num_retries=options.num_retries) | 651 num_retries=options.num_retries) |
| 648 | 652 |
| 649 report_results.LogFull( | 653 report_results.LogFull( |
| 650 results=results, | 654 results=results, |
| 651 test_type='Perf', | 655 test_type='Perf', |
| 652 test_package='Perf') | 656 test_package='Perf') |
| 653 | 657 |
| 654 if perf_options.single_step: | 658 if perf_options.single_step: |
| 655 return perf_test_runner.PrintTestOutput('single_step') | 659 return perf_test_runner.PrintTestOutput('single_step') |
| 656 | 660 |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 return _RunGTests(options, devices) | 728 return _RunGTests(options, devices) |
| 725 elif command == 'linker': | 729 elif command == 'linker': |
| 726 return _RunLinkerTests(options, devices) | 730 return _RunLinkerTests(options, devices) |
| 727 elif command == 'instrumentation': | 731 elif command == 'instrumentation': |
| 728 return _RunInstrumentationTests(options, option_parser.error, devices) | 732 return _RunInstrumentationTests(options, option_parser.error, devices) |
| 729 elif command == 'uiautomator': | 733 elif command == 'uiautomator': |
| 730 return _RunUIAutomatorTests(options, option_parser.error, devices) | 734 return _RunUIAutomatorTests(options, option_parser.error, devices) |
| 731 elif command == 'monkey': | 735 elif command == 'monkey': |
| 732 return _RunMonkeyTests(options, option_parser.error, devices) | 736 return _RunMonkeyTests(options, option_parser.error, devices) |
| 733 elif command == 'perf': | 737 elif command == 'perf': |
| 734 return _RunPerfTests(options, args, option_parser.error, devices) | 738 return _RunPerfTests(options, args, option_parser.error) |
| 735 else: | 739 else: |
| 736 raise Exception('Unknown test type.') | 740 raise Exception('Unknown test type.') |
| 737 | 741 |
| 738 | 742 |
| 739 def HelpCommand(command, _options, args, option_parser): | 743 def HelpCommand(command, _options, args, option_parser): |
| 740 """Display help for a certain command, or overall help. | 744 """Display help for a certain command, or overall help. |
| 741 | 745 |
| 742 Args: | 746 Args: |
| 743 command: String indicating the command that was received to trigger | 747 command: String indicating the command that was received to trigger |
| 744 this function. | 748 this function. |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 805 | 809 |
| 806 def main(): | 810 def main(): |
| 807 signal.signal(signal.SIGUSR1, DumpThreadStacks) | 811 signal.signal(signal.SIGUSR1, DumpThreadStacks) |
| 808 option_parser = command_option_parser.CommandOptionParser( | 812 option_parser = command_option_parser.CommandOptionParser( |
| 809 commands_dict=VALID_COMMANDS) | 813 commands_dict=VALID_COMMANDS) |
| 810 return command_option_parser.ParseAndExecute(option_parser) | 814 return command_option_parser.ParseAndExecute(option_parser) |
| 811 | 815 |
| 812 | 816 |
| 813 if __name__ == '__main__': | 817 if __name__ == '__main__': |
| 814 sys.exit(main()) | 818 sys.exit(main()) |
| OLD | NEW |