Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(33)

Side by Side Diff: build/android/test_runner.py

Issue 1376483002: [Android] Add --repeat for gtests and instrumentation tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: infinite repetition Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/results/json_results_test.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse 9 import argparse
10 import collections 10 import collections
11 import itertools
11 import logging 12 import logging
12 import os 13 import os
13 import signal 14 import signal
14 import sys 15 import sys
15 import threading 16 import threading
16 import unittest 17 import unittest
17 18
18 from devil import base_error 19 from devil import base_error
19 from devil.android import apk_helper 20 from devil.android import apk_helper
20 from devil.android import device_blacklist 21 from devil.android import device_blacklist
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 'path') 212 'path')
212 group.add_argument('--app-data-file', action='append', dest='app_data_files', 213 group.add_argument('--app-data-file', action='append', dest='app_data_files',
213 help='A file path relative to the app data directory ' 214 help='A file path relative to the app data directory '
214 'that should be saved to the host.') 215 'that should be saved to the host.')
215 group.add_argument('--app-data-file-dir', 216 group.add_argument('--app-data-file-dir',
216 help='Host directory to which app data files will be' 217 help='Host directory to which app data files will be'
217 ' saved. Used with --app-data-file.') 218 ' saved. Used with --app-data-file.')
218 group.add_argument('--delete-stale-data', dest='delete_stale_data', 219 group.add_argument('--delete-stale-data', dest='delete_stale_data',
219 action='store_true', 220 action='store_true',
220 help='Delete stale test data on the device.') 221 help='Delete stale test data on the device.')
222 group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
223 dest='repeat', type=int, default=0,
224 help='Number of times to repeat the specified set of '
225 'tests.')
221 226
222 filter_group = group.add_mutually_exclusive_group() 227 filter_group = group.add_mutually_exclusive_group()
223 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter', 228 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
224 dest='test_filter', 229 dest='test_filter',
225 help='googletest-style filter string.') 230 help='googletest-style filter string.')
226 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file', 231 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
227 help='Path to file that contains googletest-style ' 232 help='Path to file that contains googletest-style '
228 'filter strings. (Lines will be joined with ' 233 'filter strings. (Lines will be joined with '
229 '":" to create a single filter string.)') 234 '":" to create a single filter string.)')
230 235
(...skipping 10 matching lines...) Expand all
241 AddDeviceOptions(parser) 246 AddDeviceOptions(parser)
242 247
243 248
244 def AddJavaTestOptions(argument_group): 249 def AddJavaTestOptions(argument_group):
245 """Adds the Java test options to |option_parser|.""" 250 """Adds the Java test options to |option_parser|."""
246 251
247 argument_group.add_argument( 252 argument_group.add_argument(
248 '-f', '--test-filter', dest='test_filter', 253 '-f', '--test-filter', dest='test_filter',
249 help=('Test filter (if not fully qualified, will run all matches).')) 254 help=('Test filter (if not fully qualified, will run all matches).'))
250 argument_group.add_argument( 255 argument_group.add_argument(
256 '--repeat', dest='repeat', type=int, default=0,
257 help='Number of times to repeat the specified set of tests.')
258 argument_group.add_argument(
251 '-A', '--annotation', dest='annotation_str', 259 '-A', '--annotation', dest='annotation_str',
252 help=('Comma-separated list of annotations. Run only tests with any of ' 260 help=('Comma-separated list of annotations. Run only tests with any of '
253 'the given annotations. An annotation can be either a key or a ' 261 'the given annotations. An annotation can be either a key or a '
254 'key-values pair. A test that has no annotation is considered ' 262 'key-values pair. A test that has no annotation is considered '
255 '"SmallTest".')) 263 '"SmallTest".'))
256 argument_group.add_argument( 264 argument_group.add_argument(
257 '-E', '--exclude-annotation', dest='exclude_annotation_str', 265 '-E', '--exclude-annotation', dest='exclude_annotation_str',
258 help=('Comma-separated list of annotations. Exclude tests with these ' 266 help=('Comma-separated list of annotations. Exclude tests with these '
259 'annotations.')) 267 'annotations.'))
260 argument_group.add_argument( 268 argument_group.add_argument(
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 results, exit_code = test_dispatcher.RunTests( 657 results, exit_code = test_dispatcher.RunTests(
650 tests, runner_factory, devices, shard=True, test_timeout=60, 658 tests, runner_factory, devices, shard=True, test_timeout=60,
651 num_retries=args.num_retries) 659 num_retries=args.num_retries)
652 660
653 report_results.LogFull( 661 report_results.LogFull(
654 results=results, 662 results=results,
655 test_type='Linker test', 663 test_type='Linker test',
656 test_package='ChromiumLinkerTest') 664 test_package='ChromiumLinkerTest')
657 665
658 if args.json_results_file: 666 if args.json_results_file:
659 json_results.GenerateJsonResultsFile(results, args.json_results_file) 667 json_results.GenerateJsonResultsFile([results], args.json_results_file)
660 668
661 return exit_code 669 return exit_code
662 670
663 671
664 def _RunInstrumentationTests(args, devices): 672 def _RunInstrumentationTests(args, devices):
665 """Subcommand of RunTestsCommands which runs instrumentation tests.""" 673 """Subcommand of RunTestsCommands which runs instrumentation tests."""
666 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices)) 674 logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))
667 675
668 instrumentation_options = ProcessInstrumentationOptions(args) 676 instrumentation_options = ProcessInstrumentationOptions(args)
669 677
670 if len(devices) > 1 and args.wait_for_debugger: 678 if len(devices) > 1 and args.wait_for_debugger:
671 logging.warning('Debugger can not be sharded, using first available device') 679 logging.warning('Debugger can not be sharded, using first available device')
672 devices = devices[:1] 680 devices = devices[:1]
673 681
674 results = base_test_result.TestRunResults() 682 results = base_test_result.TestRunResults()
675 exit_code = 0 683 exit_code = 0
mikecase (-- gone --) 2015/09/28 14:00:12 nit: This don't think this exit_code var is really
jbudorick 2015/09/28 14:03:21 It gets modified in the body of that if statement,
676 684
677 if args.run_java_tests: 685 if args.run_java_tests:
678 runner_factory, tests = instrumentation_setup.Setup( 686 java_runner_factory, java_tests = instrumentation_setup.Setup(
679 instrumentation_options, devices) 687 instrumentation_options, devices)
680 688 else:
681 test_results, exit_code = test_dispatcher.RunTests( 689 java_runner_factory = None
682 tests, runner_factory, devices, shard=True, test_timeout=None, 690 java_tests = None
683 num_retries=args.num_retries)
684
685 results.AddTestRunResults(test_results)
686 691
687 if args.run_python_tests: 692 if args.run_python_tests:
688 runner_factory, tests = host_driven_setup.InstrumentationSetup( 693 py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
689 args.host_driven_root, args.official_build, 694 args.host_driven_root, args.official_build,
690 instrumentation_options) 695 instrumentation_options)
696 else:
697 py_runner_factory = None
698 py_tests = None
691 699
692 if tests: 700 results = []
701 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
702 else itertools.count())
703 for _ in repetitions:
704 iteration_results = base_test_result.TestRunResults()
705 if java_tests:
693 test_results, test_exit_code = test_dispatcher.RunTests( 706 test_results, test_exit_code = test_dispatcher.RunTests(
694 tests, runner_factory, devices, shard=True, test_timeout=None, 707 java_tests, java_runner_factory, devices, shard=True,
695 num_retries=args.num_retries) 708 test_timeout=None, num_retries=args.num_retries)
696 709 iteration_results.AddTestRunResults(test_results)
697 results.AddTestRunResults(test_results)
698 710
699 # Only allow exit code escalation 711 # Only allow exit code escalation
700 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: 712 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
701 exit_code = test_exit_code 713 exit_code = test_exit_code
702 714
703 if args.device_flags: 715 if py_tests:
704 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, 716 test_results, test_exit_code = test_dispatcher.RunTests(
705 args.device_flags) 717 py_tests, py_runner_factory, devices, shard=True, test_timeout=None,
718 num_retries=args.num_retries)
719 iteration_results.AddTestRunResults(test_results)
706 720
707 report_results.LogFull( 721 results.append(iteration_results)
708 results=results, 722 report_results.LogFull(
709 test_type='Instrumentation', 723 results=iteration_results,
710 test_package=os.path.basename(args.test_apk), 724 test_type='Instrumentation',
711 annotation=args.annotations, 725 test_package=os.path.basename(args.test_apk),
712 flakiness_server=args.flakiness_dashboard_server) 726 annotation=args.annotations,
727 flakiness_server=args.flakiness_dashboard_server)
713 728
714 if args.json_results_file: 729 if args.json_results_file:
715 json_results.GenerateJsonResultsFile(results, args.json_results_file) 730 json_results.GenerateJsonResultsFile(results, args.json_results_file)
716 731
717 return exit_code 732 return exit_code
718 733
719 734
720 def _RunUIAutomatorTests(args, devices): 735 def _RunUIAutomatorTests(args, devices):
721 """Subcommand of RunTestsCommands which runs uiautomator tests.""" 736 """Subcommand of RunTestsCommands which runs uiautomator tests."""
722 uiautomator_options = ProcessUIAutomatorOptions(args) 737 uiautomator_options = ProcessUIAutomatorOptions(args)
723 738
724 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices) 739 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options, devices)
725 740
726 results, exit_code = test_dispatcher.RunTests( 741 results, exit_code = test_dispatcher.RunTests(
727 tests, runner_factory, devices, shard=True, test_timeout=None, 742 tests, runner_factory, devices, shard=True, test_timeout=None,
728 num_retries=args.num_retries) 743 num_retries=args.num_retries)
729 744
730 report_results.LogFull( 745 report_results.LogFull(
731 results=results, 746 results=results,
732 test_type='UIAutomator', 747 test_type='UIAutomator',
733 test_package=os.path.basename(args.test_jar), 748 test_package=os.path.basename(args.test_jar),
734 annotation=args.annotations, 749 annotation=args.annotations,
735 flakiness_server=args.flakiness_dashboard_server) 750 flakiness_server=args.flakiness_dashboard_server)
736 751
737 if args.json_results_file: 752 if args.json_results_file:
738 json_results.GenerateJsonResultsFile(results, args.json_results_file) 753 json_results.GenerateJsonResultsFile([results], args.json_results_file)
739 754
740 return exit_code 755 return exit_code
741 756
742 757
743 def _RunJUnitTests(args): 758 def _RunJUnitTests(args):
744 """Subcommand of RunTestsCommand which runs junit tests.""" 759 """Subcommand of RunTestsCommand which runs junit tests."""
745 runner_factory, tests = junit_setup.Setup(args) 760 runner_factory, tests = junit_setup.Setup(args)
746 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory) 761 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
747 762
748 report_results.LogFull( 763 report_results.LogFull(
749 results=results, 764 results=results,
750 test_type='JUnit', 765 test_type='JUnit',
751 test_package=args.test_suite) 766 test_package=args.test_suite)
752 767
753 if args.json_results_file: 768 if args.json_results_file:
754 json_results.GenerateJsonResultsFile(results, args.json_results_file) 769 json_results.GenerateJsonResultsFile([results], args.json_results_file)
755 770
756 return exit_code 771 return exit_code
757 772
758 773
759 def _RunMonkeyTests(args, devices): 774 def _RunMonkeyTests(args, devices):
760 """Subcommand of RunTestsCommands which runs monkey tests.""" 775 """Subcommand of RunTestsCommands which runs monkey tests."""
761 monkey_options = ProcessMonkeyTestOptions(args) 776 monkey_options = ProcessMonkeyTestOptions(args)
762 777
763 runner_factory, tests = monkey_setup.Setup(monkey_options) 778 runner_factory, tests = monkey_setup.Setup(monkey_options)
764 779
765 results, exit_code = test_dispatcher.RunTests( 780 results, exit_code = test_dispatcher.RunTests(
766 tests, runner_factory, devices, shard=False, test_timeout=None, 781 tests, runner_factory, devices, shard=False, test_timeout=None,
767 num_retries=args.num_retries) 782 num_retries=args.num_retries)
768 783
769 report_results.LogFull( 784 report_results.LogFull(
770 results=results, 785 results=results,
771 test_type='Monkey', 786 test_type='Monkey',
772 test_package='Monkey') 787 test_package='Monkey')
773 788
774 if args.json_results_file: 789 if args.json_results_file:
775 json_results.GenerateJsonResultsFile(results, args.json_results_file) 790 json_results.GenerateJsonResultsFile([results], args.json_results_file)
776 791
777 return exit_code 792 return exit_code
778 793
779 794
780 def _RunPerfTests(args, active_devices): 795 def _RunPerfTests(args, active_devices):
781 """Subcommand of RunTestsCommands which runs perf tests.""" 796 """Subcommand of RunTestsCommands which runs perf tests."""
782 perf_options = ProcessPerfTestOptions(args) 797 perf_options = ProcessPerfTestOptions(args)
783 798
784 # Just save a simple json with a list of test names. 799 # Just save a simple json with a list of test names.
785 if perf_options.output_json_list: 800 if perf_options.output_json_list:
(...skipping 15 matching lines...) Expand all
801 results, _ = test_dispatcher.RunTests( 816 results, _ = test_dispatcher.RunTests(
802 tests, runner_factory, devices, shard=False, test_timeout=None, 817 tests, runner_factory, devices, shard=False, test_timeout=None,
803 num_retries=args.num_retries) 818 num_retries=args.num_retries)
804 819
805 report_results.LogFull( 820 report_results.LogFull(
806 results=results, 821 results=results,
807 test_type='Perf', 822 test_type='Perf',
808 test_package='Perf') 823 test_package='Perf')
809 824
810 if args.json_results_file: 825 if args.json_results_file:
811 json_results.GenerateJsonResultsFile(results, args.json_results_file) 826 json_results.GenerateJsonResultsFile([results], args.json_results_file)
812 827
813 if perf_options.single_step: 828 if perf_options.single_step:
814 return perf_test_runner.PrintTestOutput('single_step') 829 return perf_test_runner.PrintTestOutput('single_step')
815 830
816 perf_test_runner.PrintSummary(tests) 831 perf_test_runner.PrintSummary(tests)
817 832
818 # Always return 0 on the sharding stage. Individual tests exit_code 833 # Always return 0 on the sharding stage. Individual tests exit_code
819 # will be returned on the print_step stage. 834 # will be returned on the print_step stage.
820 return 0 835 return 0
821 836
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
928 def infra_error(message): 943 def infra_error(message):
929 parser.exit(status=constants.INFRA_EXIT_CODE, message=message) 944 parser.exit(status=constants.INFRA_EXIT_CODE, message=message)
930 945
931 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 946 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
932 infra_error('%s is not yet supported in platform mode' % args.command) 947 infra_error('%s is not yet supported in platform mode' % args.command)
933 948
934 with environment_factory.CreateEnvironment(args, infra_error) as env: 949 with environment_factory.CreateEnvironment(args, infra_error) as env:
935 with test_instance_factory.CreateTestInstance(args, infra_error) as test: 950 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
936 with test_run_factory.CreateTestRun( 951 with test_run_factory.CreateTestRun(
937 args, env, test, infra_error) as test_run: 952 args, env, test, infra_error) as test_run:
938 results = test_run.RunTests() 953 results = []
954 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
955 else itertools.count())
mikecase (-- gone --) 2015/09/28 13:37:58 From what I see online, itertools.count() will jus
jbudorick 2015/09/28 13:40:16 For infinite repetition, of course: https://code.g
mikecase (-- gone --) 2015/09/28 14:00:12 Ah, this makes sense then.
956 for _ in repetitions:
957 iteration_results = test_run.RunTests()
958 results.append(iteration_results)
939 959
940 if args.environment == 'remote_device' and args.trigger: 960 if iteration_results is not None:
941 return 0 # Not returning results, only triggering. 961 report_results.LogFull(
942 962 results=iteration_results,
943 report_results.LogFull( 963 test_type=test.TestType(),
944 results=results, 964 test_package=test_run.TestPackage(),
945 test_type=test.TestType(), 965 annotation=getattr(args, 'annotations', None),
946 test_package=test_run.TestPackage(), 966 flakiness_server=getattr(args, 'flakiness_dashboard_server',
947 annotation=getattr(args, 'annotations', None), 967 None))
948 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
949 968
950 if args.json_results_file: 969 if args.json_results_file:
951 json_results.GenerateJsonResultsFile( 970 json_results.GenerateJsonResultsFile(
952 results, args.json_results_file) 971 results, args.json_results_file)
953 972
954 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE 973 return (0 if all(r.DidRunPass() for r in results)
974 else constants.ERROR_EXIT_CODE)
955 975
956 976
957 CommandConfigTuple = collections.namedtuple( 977 CommandConfigTuple = collections.namedtuple(
958 'CommandConfigTuple', 978 'CommandConfigTuple',
959 ['add_options_func', 'help_txt']) 979 ['add_options_func', 'help_txt'])
960 VALID_COMMANDS = { 980 VALID_COMMANDS = {
961 'gtest': CommandConfigTuple( 981 'gtest': CommandConfigTuple(
962 AddGTestOptions, 982 AddGTestOptions,
963 'googletest-based C++ tests'), 983 'googletest-based C++ tests'),
964 'instrumentation': CommandConfigTuple( 984 'instrumentation': CommandConfigTuple(
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1015 if e.is_infra_error: 1035 if e.is_infra_error:
1016 return constants.INFRA_EXIT_CODE 1036 return constants.INFRA_EXIT_CODE
1017 return constants.ERROR_EXIT_CODE 1037 return constants.ERROR_EXIT_CODE
1018 except: # pylint: disable=W0702 1038 except: # pylint: disable=W0702
1019 logging.exception('Unrecognized error occurred.') 1039 logging.exception('Unrecognized error occurred.')
1020 return constants.ERROR_EXIT_CODE 1040 return constants.ERROR_EXIT_CODE
1021 1041
1022 1042
1023 if __name__ == '__main__': 1043 if __name__ == '__main__':
1024 sys.exit(main()) 1044 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/pylib/results/json_results_test.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698