Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: build/android/test_runner.py

Issue 1376483002: [Android] Add --repeat for gtests and instrumentation tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/results/json_results_test.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse 9 import argparse
10 import collections 10 import collections
11 import itertools
11 import logging 12 import logging
12 import os 13 import os
13 import signal 14 import signal
14 import sys 15 import sys
15 import threading 16 import threading
16 import unittest 17 import unittest
17 18
18 from devil import base_error 19 from devil import base_error
19 from devil.android import apk_helper 20 from devil.android import apk_helper
20 from devil.android import device_blacklist 21 from devil.android import device_blacklist
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 'path') 212 'path')
212 group.add_argument('--app-data-file', action='append', dest='app_data_files', 213 group.add_argument('--app-data-file', action='append', dest='app_data_files',
213 help='A file path relative to the app data directory ' 214 help='A file path relative to the app data directory '
214 'that should be saved to the host.') 215 'that should be saved to the host.')
215 group.add_argument('--app-data-file-dir', 216 group.add_argument('--app-data-file-dir',
216 help='Host directory to which app data files will be' 217 help='Host directory to which app data files will be'
217 ' saved. Used with --app-data-file.') 218 ' saved. Used with --app-data-file.')
218 group.add_argument('--delete-stale-data', dest='delete_stale_data', 219 group.add_argument('--delete-stale-data', dest='delete_stale_data',
219 action='store_true', 220 action='store_true',
220 help='Delete stale test data on the device.') 221 help='Delete stale test data on the device.')
222 group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
223 dest='repeat', type=int, default=0,
224 help='Number of times to repeat the specified set of '
225 'tests.')
221 226
222 filter_group = group.add_mutually_exclusive_group() 227 filter_group = group.add_mutually_exclusive_group()
223 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter', 228 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
224 dest='test_filter', 229 dest='test_filter',
225 help='googletest-style filter string.') 230 help='googletest-style filter string.')
226 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file', 231 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
227 help='Path to file that contains googletest-style ' 232 help='Path to file that contains googletest-style '
228 'filter strings. (Lines will be joined with ' 233 'filter strings. (Lines will be joined with '
229 '":" to create a single filter string.)') 234 '":" to create a single filter string.)')
230 235
(...skipping 10 matching lines...) Expand all
241 AddDeviceOptions(parser) 246 AddDeviceOptions(parser)
242 247
243 248
244 def AddJavaTestOptions(argument_group): 249 def AddJavaTestOptions(argument_group):
245 """Adds the Java test options to |option_parser|.""" 250 """Adds the Java test options to |option_parser|."""
246 251
247 argument_group.add_argument( 252 argument_group.add_argument(
248 '-f', '--test-filter', dest='test_filter', 253 '-f', '--test-filter', dest='test_filter',
249 help=('Test filter (if not fully qualified, will run all matches).')) 254 help=('Test filter (if not fully qualified, will run all matches).'))
250 argument_group.add_argument( 255 argument_group.add_argument(
256 '--repeat', dest='repeat', type=int, default=0,
257 help='Number of times to repeat the specified set of tests.')
258 argument_group.add_argument(
251 '-A', '--annotation', dest='annotation_str', 259 '-A', '--annotation', dest='annotation_str',
252 help=('Comma-separated list of annotations. Run only tests with any of ' 260 help=('Comma-separated list of annotations. Run only tests with any of '
253 'the given annotations. An annotation can be either a key or a ' 261 'the given annotations. An annotation can be either a key or a '
254 'key-values pair. A test that has no annotation is considered ' 262 'key-values pair. A test that has no annotation is considered '
255 '"SmallTest".')) 263 '"SmallTest".'))
256 argument_group.add_argument( 264 argument_group.add_argument(
257 '-E', '--exclude-annotation', dest='exclude_annotation_str', 265 '-E', '--exclude-annotation', dest='exclude_annotation_str',
258 help=('Comma-separated list of annotations. Exclude tests with these ' 266 help=('Comma-separated list of annotations. Exclude tests with these '
259 'annotations.')) 267 'annotations.'))
260 argument_group.add_argument( 268 argument_group.add_argument(
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 results, exit_code = test_dispatcher.RunTests( 657 results, exit_code = test_dispatcher.RunTests(
650 tests, runner_factory, devices, shard=True, test_timeout=60, 658 tests, runner_factory, devices, shard=True, test_timeout=60,
651 num_retries=args.num_retries) 659 num_retries=args.num_retries)
652 660
653 report_results.LogFull( 661 report_results.LogFull(
654 results=results, 662 results=results,
655 test_type='Linker test', 663 test_type='Linker test',
656 test_package='ChromiumLinkerTest') 664 test_package='ChromiumLinkerTest')
657 665
658 if args.json_results_file: 666 if args.json_results_file:
659 json_results.GenerateJsonResultsFile(results, args.json_results_file) 667 json_results.GenerateJsonResultsFile([results], args.json_results_file)
660 668
661 return exit_code 669 return exit_code
662 670
663 671
664 def _RunInstrumentationTests(args, devices): 672 def _RunInstrumentationTests(args, devices):
665 """Subcommand of RunTestsCommands which runs instrumentation tests.""" 673 """Subcommand of RunTestsCommands which runs instrumentation tests."""
666 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices)) 674 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))
667 675
668 instrumentation_options = ProcessInstrumentationOptions(args) 676 instrumentation_options = ProcessInstrumentationOptions(args)
669 677
670 if len(devices) > 1 and args.wait_for_debugger: 678 if len(devices) > 1 and args.wait_for_debugger:
671 logging.warning('Debugger can not be sharded, using first available device') 679 logging.warning('Debugger can not be sharded, using first available device')
672 devices = devices[:1] 680 devices = devices[:1]
673 681
674 results = base_test_result.TestRunResults() 682 results = base_test_result.TestRunResults()
675 exit_code = 0 683 exit_code = 0
676 684
677 if args.run_java_tests: 685 if args.run_java_tests:
678 runner_factory, tests = instrumentation_setup.Setup( 686 java_runner_factory, java_tests = instrumentation_setup.Setup(
679 instrumentation_options, devices) 687 instrumentation_options, devices)
680 688 else:
681 test_results, exit_code = test_dispatcher.RunTests( 689 java_runner_factory = None
682 tests, runner_factory, devices, shard=True, test_timeout=None, 690 java_tests = None
683 num_retries=args.num_retries)
684
685 results.AddTestRunResults(test_results)
686 691
687 if args.run_python_tests: 692 if args.run_python_tests:
688 runner_factory, tests = host_driven_setup.InstrumentationSetup( 693 py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
689 args.host_driven_root, args.official_build, 694 args.host_driven_root, args.official_build,
690 instrumentation_options) 695 instrumentation_options)
696 else:
697 py_runner_factory = None
698 py_tests = None
691 699
692 if tests: 700 results = []
701 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
702 else itertools.count())
703 for _ in repetitions:
704 iteration_results = base_test_result.TestRunResults()
705 if java_tests:
693 test_results, test_exit_code = test_dispatcher.RunTests( 706 test_results, test_exit_code = test_dispatcher.RunTests(
694 tests, runner_factory, devices, shard=True, test_timeout=None, 707 java_tests, java_runner_factory, devices, shard=True,
695 num_retries=args.num_retries) 708 test_timeout=None, num_retries=args.num_retries)
696 709 iteration_results.AddTestRunResults(test_results)
697 results.AddTestRunResults(test_results)
698 710
699 # Only allow exit code escalation 711 # Only allow exit code escalation
700 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: 712 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
701 exit_code = test_exit_code 713 exit_code = test_exit_code
702 714
703 if args.device_flags: 715 if py_tests:
704 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, 716 test_results, test_exit_code = test_dispatcher.RunTests(
705 args.device_flags) 717 py_tests, py_runner_factory, devices, shard=True, test_timeout=None,
718 num_retries=args.num_retries)
719 iteration_results.AddTestRunResults(test_results)
706 720
707 report_results.LogFull( 721 # Only allow exit code escalation
708 results=results, 722 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
709 test_type='Instrumentation', 723 exit_code = test_exit_code
710 test_package=os.path.basename(args.test_apk), 724
711 annotation=args.annotations, 725 results.append(iteration_results)
712 flakiness_server=args.flakiness_dashboard_server) 726 report_results.LogFull(
727 results=iteration_results,
728 test_type='Instrumentation',
729 test_package=os.path.basename(args.test_apk),
730 annotation=args.annotations,
731 flakiness_server=args.flakiness_dashboard_server)
713 732
714 if args.json_results_file: 733 if args.json_results_file:
715 json_results.GenerateJsonResultsFile(results, args.json_results_file) 734 json_results.GenerateJsonResultsFile(results, args.json_results_file)
716 735
717 return exit_code 736 return exit_code
718 737
719 738
720 def _RunUIAutomatorTests(args, devices): 739 def _RunUIAutomatorTests(args, devices):
721 """Subcommand of RunTestsCommands which runs uiautomator tests.""" 740 """Subcommand of RunTestsCommands which runs uiautomator tests."""
722 uiautomator_options = ProcessUIAutomatorOptions(args) 741 uiautomator_options = ProcessUIAutomatorOptions(args)
723 742
724 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices) 743 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices)
725 744
726 results, exit_code = test_dispatcher.RunTests( 745 results, exit_code = test_dispatcher.RunTests(
727 tests, runner_factory, devices, shard=True, test_timeout=None, 746 tests, runner_factory, devices, shard=True, test_timeout=None,
728 num_retries=args.num_retries) 747 num_retries=args.num_retries)
729 748
730 report_results.LogFull( 749 report_results.LogFull(
731 results=results, 750 results=results,
732 test_type='UIAutomator', 751 test_type='UIAutomator',
733 test_package=os.path.basename(args.test_jar), 752 test_package=os.path.basename(args.test_jar),
734 annotation=args.annotations, 753 annotation=args.annotations,
735 flakiness_server=args.flakiness_dashboard_server) 754 flakiness_server=args.flakiness_dashboard_server)
736 755
737 if args.json_results_file: 756 if args.json_results_file:
738 json_results.GenerateJsonResultsFile(results, args.json_results_file) 757 json_results.GenerateJsonResultsFile([results], args.json_results_file)
739 758
740 return exit_code 759 return exit_code
741 760
742 761
743 def _RunJUnitTests(args): 762 def _RunJUnitTests(args):
744 """Subcommand of RunTestsCommand which runs junit tests.""" 763 """Subcommand of RunTestsCommand which runs junit tests."""
745 runner_factory, tests = junit_setup.Setup(args) 764 runner_factory, tests = junit_setup.Setup(args)
746 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory) 765 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
747 766
748 report_results.LogFull( 767 report_results.LogFull(
749 results=results, 768 results=results,
750 test_type='JUnit', 769 test_type='JUnit',
751 test_package=args.test_suite) 770 test_package=args.test_suite)
752 771
753 if args.json_results_file: 772 if args.json_results_file:
754 json_results.GenerateJsonResultsFile(results, args.json_results_file) 773 json_results.GenerateJsonResultsFile([results], args.json_results_file)
755 774
756 return exit_code 775 return exit_code
757 776
758 777
759 def _RunMonkeyTests(args, devices): 778 def _RunMonkeyTests(args, devices):
760 """Subcommand of RunTestsCommands which runs monkey tests.""" 779 """Subcommand of RunTestsCommands which runs monkey tests."""
761 monkey_options = ProcessMonkeyTestOptions(args) 780 monkey_options = ProcessMonkeyTestOptions(args)
762 781
763 runner_factory, tests = monkey_setup.Setup(monkey_options) 782 runner_factory, tests = monkey_setup.Setup(monkey_options)
764 783
765 results, exit_code = test_dispatcher.RunTests( 784 results, exit_code = test_dispatcher.RunTests(
766 tests, runner_factory, devices, shard=False, test_timeout=None, 785 tests, runner_factory, devices, shard=False, test_timeout=None,
767 num_retries=args.num_retries) 786 num_retries=args.num_retries)
768 787
769 report_results.LogFull( 788 report_results.LogFull(
770 results=results, 789 results=results,
771 test_type='Monkey', 790 test_type='Monkey',
772 test_package='Monkey') 791 test_package='Monkey')
773 792
774 if args.json_results_file: 793 if args.json_results_file:
775 json_results.GenerateJsonResultsFile(results, args.json_results_file) 794 json_results.GenerateJsonResultsFile([results], args.json_results_file)
776 795
777 return exit_code 796 return exit_code
778 797
779 798
780 def _RunPerfTests(args, active_devices): 799 def _RunPerfTests(args, active_devices):
781 """Subcommand of RunTestsCommands which runs perf tests.""" 800 """Subcommand of RunTestsCommands which runs perf tests."""
782 perf_options = ProcessPerfTestOptions(args) 801 perf_options = ProcessPerfTestOptions(args)
783 802
784 # Just save a simple json with a list of test names. 803 # Just save a simple json with a list of test names.
785 if perf_options.output_json_list: 804 if perf_options.output_json_list:
(...skipping 15 matching lines...) Expand all
801 results, _ = test_dispatcher.RunTests( 820 results, _ = test_dispatcher.RunTests(
802 tests, runner_factory, devices, shard=False, test_timeout=None, 821 tests, runner_factory, devices, shard=False, test_timeout=None,
803 num_retries=args.num_retries) 822 num_retries=args.num_retries)
804 823
805 report_results.LogFull( 824 report_results.LogFull(
806 results=results, 825 results=results,
807 test_type='Perf', 826 test_type='Perf',
808 test_package='Perf') 827 test_package='Perf')
809 828
810 if args.json_results_file: 829 if args.json_results_file:
811 json_results.GenerateJsonResultsFile(results, args.json_results_file) 830 json_results.GenerateJsonResultsFile([results], args.json_results_file)
812 831
813 if perf_options.single_step: 832 if perf_options.single_step:
814 return perf_test_runner.PrintTestOutput('single_step') 833 return perf_test_runner.PrintTestOutput('single_step')
815 834
816 perf_test_runner.PrintSummary(tests) 835 perf_test_runner.PrintSummary(tests)
817 836
818 # Always return 0 on the sharding stage. Individual tests exit_code 837 # Always return 0 on the sharding stage. Individual tests exit_code
819 # will be returned on the print_step stage. 838 # will be returned on the print_step stage.
820 return 0 839 return 0
821 840
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
928 def infra_error(message): 947 def infra_error(message):
929 parser.exit(status=constants.INFRA_EXIT_CODE, message=message) 948 parser.exit(status=constants.INFRA_EXIT_CODE, message=message)
930 949
931 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 950 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
932 infra_error('%s is not yet supported in platform mode' % args.command) 951 infra_error('%s is not yet supported in platform mode' % args.command)
933 952
934 with environment_factory.CreateEnvironment(args, infra_error) as env: 953 with environment_factory.CreateEnvironment(args, infra_error) as env:
935 with test_instance_factory.CreateTestInstance(args, infra_error) as test: 954 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
936 with test_run_factory.CreateTestRun( 955 with test_run_factory.CreateTestRun(
937 args, env, test, infra_error) as test_run: 956 args, env, test, infra_error) as test_run:
938 results = test_run.RunTests() 957 results = []
958 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
959 else itertools.count())
960 for _ in repetitions:
961 iteration_results = test_run.RunTests()
962 results.append(iteration_results)
939 963
940 if args.environment == 'remote_device' and args.trigger: 964 if iteration_results is not None:
941 return 0 # Not returning results, only triggering. 965 report_results.LogFull(
942 966 results=iteration_results,
943 report_results.LogFull( 967 test_type=test.TestType(),
944 results=results, 968 test_package=test_run.TestPackage(),
945 test_type=test.TestType(), 969 annotation=getattr(args, 'annotations', None),
946 test_package=test_run.TestPackage(), 970 flakiness_server=getattr(args, 'flakiness_dashboard_server',
947 annotation=getattr(args, 'annotations', None), 971 None))
948 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
949 972
950 if args.json_results_file: 973 if args.json_results_file:
951 json_results.GenerateJsonResultsFile( 974 json_results.GenerateJsonResultsFile(
952 results, args.json_results_file) 975 results, args.json_results_file)
953 976
954 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE 977 return (0 if all(r.DidRunPass() for r in results)
978 else constants.ERROR_EXIT_CODE)
955 979
956 980
957 CommandConfigTuple = collections.namedtuple( 981 CommandConfigTuple = collections.namedtuple(
958 'CommandConfigTuple', 982 'CommandConfigTuple',
959 ['add_options_func', 'help_txt']) 983 ['add_options_func', 'help_txt'])
960 VALID_COMMANDS = { 984 VALID_COMMANDS = {
961 'gtest': CommandConfigTuple( 985 'gtest': CommandConfigTuple(
962 AddGTestOptions, 986 AddGTestOptions,
963 'googletest-based C++ tests'), 987 'googletest-based C++ tests'),
964 'instrumentation': CommandConfigTuple( 988 'instrumentation': CommandConfigTuple(
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1015 if e.is_infra_error: 1039 if e.is_infra_error:
1016 return constants.INFRA_EXIT_CODE 1040 return constants.INFRA_EXIT_CODE
1017 return constants.ERROR_EXIT_CODE 1041 return constants.ERROR_EXIT_CODE
1018 except: # pylint: disable=W0702 1042 except: # pylint: disable=W0702
1019 logging.exception('Unrecognized error occurred.') 1043 logging.exception('Unrecognized error occurred.')
1020 return constants.ERROR_EXIT_CODE 1044 return constants.ERROR_EXIT_CODE
1021 1045
1022 1046
1023 if __name__ == '__main__': 1047 if __name__ == '__main__':
1024 sys.exit(main()) 1048 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/pylib/results/json_results_test.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698