| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
| 8 | 8 |
| 9 import argparse | 9 import argparse |
| 10 import collections | 10 import collections |
| (...skipping 18 matching lines...) Expand all Loading... |
| 29 if host_paths.DEVIL_PATH not in sys.path: | 29 if host_paths.DEVIL_PATH not in sys.path: |
| 30 sys.path.append(host_paths.DEVIL_PATH) | 30 sys.path.append(host_paths.DEVIL_PATH) |
| 31 | 31 |
| 32 from devil import base_error | 32 from devil import base_error |
| 33 from devil.utils import reraiser_thread | 33 from devil.utils import reraiser_thread |
| 34 from devil.utils import run_tests_helper | 34 from devil.utils import run_tests_helper |
| 35 | 35 |
| 36 from pylib import constants | 36 from pylib import constants |
| 37 from pylib.base import base_test_result | 37 from pylib.base import base_test_result |
| 38 from pylib.base import environment_factory | 38 from pylib.base import environment_factory |
| 39 from pylib.base import output_manager |
| 40 from pylib.base import output_manager_factory |
| 39 from pylib.base import test_instance_factory | 41 from pylib.base import test_instance_factory |
| 40 from pylib.base import test_run_factory | 42 from pylib.base import test_run_factory |
| 41 from pylib.results import json_results | 43 from pylib.results import json_results |
| 42 from pylib.results import report_results | 44 from pylib.results import report_results |
| 45 from pylib.results.presentation import test_results_presentation |
| 43 from pylib.utils import logdog_helper | 46 from pylib.utils import logdog_helper |
| 44 from pylib.utils import logging_utils | 47 from pylib.utils import logging_utils |
| 45 | 48 |
| 46 from py_utils import contextlib_ext | 49 from py_utils import contextlib_ext |
| 47 | 50 |
| 48 | 51 |
| 49 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( | 52 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( |
| 50 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) | 53 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) |
| 51 | 54 |
| 52 | 55 |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 146 help='Run the test scripts in platform mode, which ' | 149 help='Run the test scripts in platform mode, which ' |
| 147 'conceptually separates the test runner from the ' | 150 'conceptually separates the test runner from the ' |
| 148 '"device" (local or remote, real or emulated) on ' | 151 '"device" (local or remote, real or emulated) on ' |
| 149 'which the tests are running. [experimental]') | 152 'which the tests are running. [experimental]') |
| 150 | 153 |
| 151 parser.add_argument( | 154 parser.add_argument( |
| 152 '-e', '--environment', | 155 '-e', '--environment', |
| 153 default='local', choices=constants.VALID_ENVIRONMENTS, | 156 default='local', choices=constants.VALID_ENVIRONMENTS, |
| 154 help='Test environment to run in (default: %(default)s).') | 157 help='Test environment to run in (default: %(default)s).') |
| 155 | 158 |
| 159 parser.add_argument( |
| 160 '--local-output', |
| 161 action='store_true', |
| 162 help='Whether to archive test output locally and generate ' |
| 163 'a local results detail page.') |
| 164 |
| 156 class FastLocalDevAction(argparse.Action): | 165 class FastLocalDevAction(argparse.Action): |
| 157 def __call__(self, parser, namespace, values, option_string=None): | 166 def __call__(self, parser, namespace, values, option_string=None): |
| 158 namespace.verbose_count = max(namespace.verbose_count, 1) | 167 namespace.verbose_count = max(namespace.verbose_count, 1) |
| 159 namespace.num_retries = 0 | 168 namespace.num_retries = 0 |
| 160 namespace.enable_device_cache = True | 169 namespace.enable_device_cache = True |
| 161 namespace.enable_concurrent_adb = True | 170 namespace.enable_concurrent_adb = True |
| 162 namespace.skip_clear_data = True | 171 namespace.skip_clear_data = True |
| 163 namespace.extract_test_list_from_filter = True | 172 namespace.extract_test_list_from_filter = True |
| 164 | 173 |
| 165 parser.add_argument( | 174 parser.add_argument( |
| (...skipping 656 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 822 logging.critical( | 831 logging.critical( |
| 823 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) | 832 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) |
| 824 | 833 |
| 825 | 834 |
| 826 logcats_uploader = contextlib_ext.Optional( | 835 logcats_uploader = contextlib_ext.Optional( |
| 827 upload_logcats_file(), | 836 upload_logcats_file(), |
| 828 'upload_logcats_file' in args and args.upload_logcats_file) | 837 'upload_logcats_file' in args and args.upload_logcats_file) |
| 829 | 838 |
| 830 ### Set up test objects. | 839 ### Set up test objects. |
| 831 | 840 |
| 832 env = environment_factory.CreateEnvironment(args, infra_error) | 841 out_manager = output_manager_factory.CreateOutputManager(args) |
| 842 env = environment_factory.CreateEnvironment( |
| 843 args, out_manager, infra_error) |
| 833 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) | 844 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) |
| 834 test_run = test_run_factory.CreateTestRun( | 845 test_run = test_run_factory.CreateTestRun( |
| 835 args, env, test_instance, infra_error) | 846 args, env, test_instance, infra_error) |
| 836 | 847 |
| 837 ### Run. | 848 ### Run. |
| 849 with out_manager: |
| 850 with json_writer, logcats_uploader, env, test_instance, test_run: |
| 838 | 851 |
| 839 with json_writer, logcats_uploader, env, test_instance, test_run: | 852 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
| 853 else itertools.count()) |
| 854 result_counts = collections.defaultdict( |
| 855 lambda: collections.defaultdict(int)) |
| 856 iteration_count = 0 |
| 857 for _ in repetitions: |
| 858 raw_results = test_run.RunTests() |
| 859 if not raw_results: |
| 860 continue |
| 840 | 861 |
| 841 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 | 862 all_raw_results.append(raw_results) |
| 842 else itertools.count()) | |
| 843 result_counts = collections.defaultdict( | |
| 844 lambda: collections.defaultdict(int)) | |
| 845 iteration_count = 0 | |
| 846 for _ in repetitions: | |
| 847 raw_results = test_run.RunTests() | |
| 848 if not raw_results: | |
| 849 continue | |
| 850 | 863 |
| 851 all_raw_results.append(raw_results) | 864 iteration_results = base_test_result.TestRunResults() |
| 865 for r in reversed(raw_results): |
| 866 iteration_results.AddTestRunResults(r) |
| 867 all_iteration_results.append(iteration_results) |
| 852 | 868 |
| 853 iteration_results = base_test_result.TestRunResults() | 869 iteration_count += 1 |
| 854 for r in reversed(raw_results): | 870 for r in iteration_results.GetAll(): |
| 855 iteration_results.AddTestRunResults(r) | 871 result_counts[r.GetName()][r.GetType()] += 1 |
| 856 all_iteration_results.append(iteration_results) | 872 report_results.LogFull( |
| 873 results=iteration_results, |
| 874 test_type=test_instance.TestType(), |
| 875 test_package=test_run.TestPackage(), |
| 876 annotation=getattr(args, 'annotations', None), |
| 877 flakiness_server=getattr(args, 'flakiness_dashboard_server', |
| 878 None)) |
| 879 if args.break_on_failure and not iteration_results.DidRunPass(): |
| 880 break |
| 857 | 881 |
| 858 iteration_count += 1 | 882 if iteration_count > 1: |
| 859 for r in iteration_results.GetAll(): | 883 # display summary results |
| 860 result_counts[r.GetName()][r.GetType()] += 1 | 884 # only display results for a test if at least one test did not pass |
| 861 report_results.LogFull( | 885 all_pass = 0 |
| 862 results=iteration_results, | 886 tot_tests = 0 |
| 863 test_type=test_instance.TestType(), | 887 for test_name in result_counts: |
| 864 test_package=test_run.TestPackage(), | 888 tot_tests += 1 |
| 865 annotation=getattr(args, 'annotations', None), | 889 if any(result_counts[test_name][x] for x in ( |
| 866 flakiness_server=getattr(args, 'flakiness_dashboard_server', | 890 base_test_result.ResultType.FAIL, |
| 867 None)) | 891 base_test_result.ResultType.CRASH, |
| 868 if args.break_on_failure and not iteration_results.DidRunPass(): | 892 base_test_result.ResultType.TIMEOUT, |
| 869 break | 893 base_test_result.ResultType.UNKNOWN)): |
| 894 logging.critical( |
| 895 '%s: %s', |
| 896 test_name, |
| 897 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) |
| 898 for i in base_test_result.ResultType.GetTypes())) |
| 899 else: |
| 900 all_pass += 1 |
| 870 | 901 |
| 871 if iteration_count > 1: | 902 logging.critical('%s of %s tests passed in all %s runs', |
| 872 # display summary results | 903 str(all_pass), |
| 873 # only display results for a test if at least one test did not pass | 904 str(tot_tests), |
| 874 all_pass = 0 | 905 str(iteration_count)) |
| 875 tot_tests = 0 | |
| 876 for test_name in result_counts: | |
| 877 tot_tests += 1 | |
| 878 if any(result_counts[test_name][x] for x in ( | |
| 879 base_test_result.ResultType.FAIL, | |
| 880 base_test_result.ResultType.CRASH, | |
| 881 base_test_result.ResultType.TIMEOUT, | |
| 882 base_test_result.ResultType.UNKNOWN)): | |
| 883 logging.critical( | |
| 884 '%s: %s', | |
| 885 test_name, | |
| 886 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) | |
| 887 for i in base_test_result.ResultType.GetTypes())) | |
| 888 else: | |
| 889 all_pass += 1 | |
| 890 | 906 |
| 891 logging.critical('%s of %s tests passed in all %s runs', | 907 if args.local_output and args.json_results_file: |
| 892 str(all_pass), | 908 with out_manager.ArchivedTempfile( |
| 893 str(tot_tests), | 909 'test_results_presentation.html', |
| 894 str(iteration_count)) | 910 'test_results_presentation', |
| 911 output_manager.Datatype.HTML) as results_detail_file: |
| 912 result_html_string, _, _ = test_results_presentation.result_details( |
| 913 json_path=args.json_results_file, |
| 914 test_name=args.command, |
| 915 cs_base_url='http://cs.chromium.org', |
| 916 local_output=True) |
| 917 results_detail_file.write(result_html_string) |
| 918 results_detail_file.flush() |
| 919 logging.critical('TEST RESULTS: %s', results_detail_file.Link()) |
| 895 | 920 |
| 896 if args.command == 'perf' and (args.steps or args.single_step): | 921 if args.command == 'perf' and (args.steps or args.single_step): |
| 897 return 0 | 922 return 0 |
| 898 | 923 |
| 899 return (0 if all(r.DidRunPass() for r in all_iteration_results) | 924 return (0 if all(r.DidRunPass() for r in all_iteration_results) |
| 900 else constants.ERROR_EXIT_CODE) | 925 else constants.ERROR_EXIT_CODE) |
| 901 | 926 |
| 902 | 927 |
| 903 def DumpThreadStacks(_signal, _frame): | 928 def DumpThreadStacks(_signal, _frame): |
| 904 for thread in threading.enumerate(): | 929 for thread in threading.enumerate(): |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 986 if e.is_infra_error: | 1011 if e.is_infra_error: |
| 987 return constants.INFRA_EXIT_CODE | 1012 return constants.INFRA_EXIT_CODE |
| 988 return constants.ERROR_EXIT_CODE | 1013 return constants.ERROR_EXIT_CODE |
| 989 except: # pylint: disable=W0702 | 1014 except: # pylint: disable=W0702 |
| 990 logging.exception('Unrecognized error occurred.') | 1015 logging.exception('Unrecognized error occurred.') |
| 991 return constants.ERROR_EXIT_CODE | 1016 return constants.ERROR_EXIT_CODE |
| 992 | 1017 |
| 993 | 1018 |
| 994 if __name__ == '__main__': | 1019 if __name__ == '__main__': |
| 995 sys.exit(main()) | 1020 sys.exit(main()) |
| OLD | NEW |