OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
8 | 8 |
9 import argparse | 9 import argparse |
10 import collections | 10 import collections |
(...skipping 10 matching lines...) Expand all Loading... |
21 from devil import devil_env | 21 from devil import devil_env |
22 from devil.android import device_blacklist | 22 from devil.android import device_blacklist |
23 from devil.android import device_errors | 23 from devil.android import device_errors |
24 from devil.android import device_utils | 24 from devil.android import device_utils |
25 from devil.android import forwarder | 25 from devil.android import forwarder |
26 from devil.android import ports | 26 from devil.android import ports |
27 from devil.utils import reraiser_thread | 27 from devil.utils import reraiser_thread |
28 from devil.utils import run_tests_helper | 28 from devil.utils import run_tests_helper |
29 | 29 |
30 from pylib import constants | 30 from pylib import constants |
31 from pylib.constants import host_paths | |
32 from pylib.base import base_test_result | 31 from pylib.base import base_test_result |
33 from pylib.base import environment_factory | 32 from pylib.base import environment_factory |
34 from pylib.base import test_dispatcher | 33 from pylib.base import test_dispatcher |
35 from pylib.base import test_instance_factory | 34 from pylib.base import test_instance_factory |
36 from pylib.base import test_run_factory | 35 from pylib.base import test_run_factory |
| 36 from pylib.constants import host_paths |
37 from pylib.linker import setup as linker_setup | 37 from pylib.linker import setup as linker_setup |
38 from pylib.junit import setup as junit_setup | 38 from pylib.junit import setup as junit_setup |
39 from pylib.junit import test_dispatcher as junit_dispatcher | 39 from pylib.junit import test_dispatcher as junit_dispatcher |
40 from pylib.monkey import setup as monkey_setup | 40 from pylib.monkey import setup as monkey_setup |
41 from pylib.monkey import test_options as monkey_test_options | 41 from pylib.monkey import test_options as monkey_test_options |
42 from pylib.perf import setup as perf_setup | 42 from pylib.perf import setup as perf_setup |
43 from pylib.perf import test_options as perf_test_options | 43 from pylib.perf import test_options as perf_test_options |
44 from pylib.perf import test_runner as perf_test_runner | 44 from pylib.perf import test_runner as perf_test_runner |
45 from pylib.results import json_results | 45 from pylib.results import json_results |
46 from pylib.results import report_results | 46 from pylib.results import report_results |
(...skipping 798 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
845 logging.fatal(message) | 845 logging.fatal(message) |
846 sys.exit(constants.INFRA_EXIT_CODE) | 846 sys.exit(constants.INFRA_EXIT_CODE) |
847 | 847 |
848 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: | 848 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: |
849 infra_error('%s is not yet supported in platform mode' % args.command) | 849 infra_error('%s is not yet supported in platform mode' % args.command) |
850 | 850 |
851 with environment_factory.CreateEnvironment(args, infra_error) as env: | 851 with environment_factory.CreateEnvironment(args, infra_error) as env: |
852 with test_instance_factory.CreateTestInstance(args, infra_error) as test: | 852 with test_instance_factory.CreateTestInstance(args, infra_error) as test: |
853 with test_run_factory.CreateTestRun( | 853 with test_run_factory.CreateTestRun( |
854 args, env, test, infra_error) as test_run: | 854 args, env, test, infra_error) as test_run: |
855 results = [] | 855 |
| 856 # TODO(jbudorick): Rewrite results handling. |
| 857 |
| 858 # all_raw_results is a list of lists of base_test_result.TestRunResults |
| 859 # objects. Each instance of TestRunResults contains all test results |
| 860 # produced by a single try, while each list of TestRunResults contains |
| 861 # all tries in a single iteration. |
| 862 all_raw_results = [] |
| 863 # all_iteration_results is a list of base_test_result.TestRunResults |
| 864 # objects. Each instance of TestRunResults contains the last test result |
| 865 # for each test run in that iteration. |
| 866 all_iteration_results = [] |
| 867 |
856 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 | 868 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
857 else itertools.count()) | 869 else itertools.count()) |
858 result_counts = collections.defaultdict( | 870 result_counts = collections.defaultdict( |
859 lambda: collections.defaultdict(int)) | 871 lambda: collections.defaultdict(int)) |
860 iteration_count = 0 | 872 iteration_count = 0 |
861 for _ in repetitions: | 873 for _ in repetitions: |
862 iteration_results = test_run.RunTests() | 874 raw_results = test_run.RunTests() |
863 if iteration_results is not None: | 875 if not raw_results: |
864 iteration_count += 1 | 876 continue |
865 results.append(iteration_results) | 877 |
866 for r in iteration_results.GetAll(): | 878 all_raw_results.append(raw_results) |
867 result_counts[r.GetName()][r.GetType()] += 1 | 879 |
868 report_results.LogFull( | 880 iteration_results = base_test_result.TestRunResults() |
869 results=iteration_results, | 881 for r in reversed(raw_results): |
870 test_type=test.TestType(), | 882 iteration_results.AddTestRunResults(r) |
871 test_package=test_run.TestPackage(), | 883 all_iteration_results.append(iteration_results) |
872 annotation=getattr(args, 'annotations', None), | 884 |
873 flakiness_server=getattr(args, 'flakiness_dashboard_server', | 885 iteration_count += 1 |
874 None)) | 886 for r in iteration_results.GetAll(): |
875 if args.break_on_failure and not iteration_results.DidRunPass(): | 887 result_counts[r.GetName()][r.GetType()] += 1 |
876 break | 888 report_results.LogFull( |
| 889 results=iteration_results, |
| 890 test_type=test.TestType(), |
| 891 test_package=test_run.TestPackage(), |
| 892 annotation=getattr(args, 'annotations', None), |
| 893 flakiness_server=getattr(args, 'flakiness_dashboard_server', |
| 894 None)) |
| 895 if args.break_on_failure and not iteration_results.DidRunPass(): |
| 896 break |
877 | 897 |
878 if iteration_count > 1: | 898 if iteration_count > 1: |
879 # display summary results | 899 # display summary results |
880 # only display results for a test if at least one test did not pass | 900 # only display results for a test if at least one test did not pass |
881 all_pass = 0 | 901 all_pass = 0 |
882 tot_tests = 0 | 902 tot_tests = 0 |
883 for test_name in result_counts: | 903 for test_name in result_counts: |
884 tot_tests += 1 | 904 tot_tests += 1 |
885 if any(result_counts[test_name][x] for x in ( | 905 if any(result_counts[test_name][x] for x in ( |
886 base_test_result.ResultType.FAIL, | 906 base_test_result.ResultType.FAIL, |
887 base_test_result.ResultType.CRASH, | 907 base_test_result.ResultType.CRASH, |
888 base_test_result.ResultType.TIMEOUT, | 908 base_test_result.ResultType.TIMEOUT, |
889 base_test_result.ResultType.UNKNOWN)): | 909 base_test_result.ResultType.UNKNOWN)): |
890 logging.critical( | 910 logging.critical( |
891 '%s: %s', | 911 '%s: %s', |
892 test_name, | 912 test_name, |
893 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) | 913 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) |
894 for i in base_test_result.ResultType.GetTypes())) | 914 for i in base_test_result.ResultType.GetTypes())) |
895 else: | 915 else: |
896 all_pass += 1 | 916 all_pass += 1 |
897 | 917 |
898 logging.critical('%s of %s tests passed in all %s runs', | 918 logging.critical('%s of %s tests passed in all %s runs', |
899 str(all_pass), | 919 str(all_pass), |
900 str(tot_tests), | 920 str(tot_tests), |
901 str(iteration_count)) | 921 str(iteration_count)) |
902 | 922 |
903 if args.json_results_file: | 923 if args.json_results_file: |
904 json_results.GenerateJsonResultsFile( | 924 json_results.GenerateJsonResultsFile( |
905 results, args.json_results_file) | 925 all_raw_results, args.json_results_file) |
906 | 926 |
907 return (0 if all(r.DidRunPass() for r in results) | 927 return (0 if all(r.DidRunPass() for r in all_iteration_results) |
908 else constants.ERROR_EXIT_CODE) | 928 else constants.ERROR_EXIT_CODE) |
909 | 929 |
910 | 930 |
911 CommandConfigTuple = collections.namedtuple( | 931 CommandConfigTuple = collections.namedtuple( |
912 'CommandConfigTuple', | 932 'CommandConfigTuple', |
913 ['add_options_func', 'help_txt']) | 933 ['add_options_func', 'help_txt']) |
914 VALID_COMMANDS = { | 934 VALID_COMMANDS = { |
915 'gtest': CommandConfigTuple( | 935 'gtest': CommandConfigTuple( |
916 AddGTestOptions, | 936 AddGTestOptions, |
917 'googletest-based C++ tests'), | 937 'googletest-based C++ tests'), |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
966 if e.is_infra_error: | 986 if e.is_infra_error: |
967 return constants.INFRA_EXIT_CODE | 987 return constants.INFRA_EXIT_CODE |
968 return constants.ERROR_EXIT_CODE | 988 return constants.ERROR_EXIT_CODE |
969 except: # pylint: disable=W0702 | 989 except: # pylint: disable=W0702 |
970 logging.exception('Unrecognized error occurred.') | 990 logging.exception('Unrecognized error occurred.') |
971 return constants.ERROR_EXIT_CODE | 991 return constants.ERROR_EXIT_CODE |
972 | 992 |
973 | 993 |
974 if __name__ == '__main__': | 994 if __name__ == '__main__': |
975 sys.exit(main()) | 995 sys.exit(main()) |
OLD | NEW |