OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
8 | 8 |
9 import argparse | 9 import argparse |
10 import collections | 10 import collections |
(...skipping 18 matching lines...) Expand all Loading... | |
29 if host_paths.DEVIL_PATH not in sys.path: | 29 if host_paths.DEVIL_PATH not in sys.path: |
30 sys.path.append(host_paths.DEVIL_PATH) | 30 sys.path.append(host_paths.DEVIL_PATH) |
31 | 31 |
32 from devil import base_error | 32 from devil import base_error |
33 from devil.utils import reraiser_thread | 33 from devil.utils import reraiser_thread |
34 from devil.utils import run_tests_helper | 34 from devil.utils import run_tests_helper |
35 | 35 |
36 from pylib import constants | 36 from pylib import constants |
37 from pylib.base import base_test_result | 37 from pylib.base import base_test_result |
38 from pylib.base import environment_factory | 38 from pylib.base import environment_factory |
39 from pylib.base import output_manager | |
40 from pylib.base import output_manager_factory | |
39 from pylib.base import test_instance_factory | 41 from pylib.base import test_instance_factory |
40 from pylib.base import test_run_factory | 42 from pylib.base import test_run_factory |
41 from pylib.results import json_results | 43 from pylib.results import json_results |
42 from pylib.results import report_results | 44 from pylib.results import report_results |
45 from pylib.results.presentation import test_results_presentation | |
43 from pylib.utils import logdog_helper | 46 from pylib.utils import logdog_helper |
44 from pylib.utils import logging_utils | 47 from pylib.utils import logging_utils |
45 | 48 |
46 from py_utils import contextlib_ext | 49 from py_utils import contextlib_ext |
47 | 50 |
48 | 51 |
49 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( | 52 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( |
50 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) | 53 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) |
51 | 54 |
52 | 55 |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
146 help='Run the test scripts in platform mode, which ' | 149 help='Run the test scripts in platform mode, which ' |
147 'conceptually separates the test runner from the ' | 150 'conceptually separates the test runner from the ' |
148 '"device" (local or remote, real or emulated) on ' | 151 '"device" (local or remote, real or emulated) on ' |
149 'which the tests are running. [experimental]') | 152 'which the tests are running. [experimental]') |
150 | 153 |
151 parser.add_argument( | 154 parser.add_argument( |
152 '-e', '--environment', | 155 '-e', '--environment', |
153 default='local', choices=constants.VALID_ENVIRONMENTS, | 156 default='local', choices=constants.VALID_ENVIRONMENTS, |
154 help='Test environment to run in (default: %(default)s).') | 157 help='Test environment to run in (default: %(default)s).') |
155 | 158 |
159 parser.add_argument( | |
160 '--local-output', | |
161 action='store_true', | |
162 help='Whether to archive test output locally and generate ' | |
163 'a local results detail page.') | |
164 | |
156 class FastLocalDevAction(argparse.Action): | 165 class FastLocalDevAction(argparse.Action): |
157 def __call__(self, parser, namespace, values, option_string=None): | 166 def __call__(self, parser, namespace, values, option_string=None): |
158 namespace.verbose_count = max(namespace.verbose_count, 1) | 167 namespace.verbose_count = max(namespace.verbose_count, 1) |
159 namespace.num_retries = 0 | 168 namespace.num_retries = 0 |
160 namespace.enable_device_cache = True | 169 namespace.enable_device_cache = True |
161 namespace.enable_concurrent_adb = True | 170 namespace.enable_concurrent_adb = True |
162 namespace.skip_clear_data = True | 171 namespace.skip_clear_data = True |
163 namespace.extract_test_list_from_filter = True | 172 namespace.extract_test_list_from_filter = True |
164 | 173 |
165 parser.add_argument( | 174 parser.add_argument( |
(...skipping 656 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
822 logging.critical( | 831 logging.critical( |
823 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) | 832 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) |
824 | 833 |
825 | 834 |
826 logcats_uploader = contextlib_ext.Optional( | 835 logcats_uploader = contextlib_ext.Optional( |
827 upload_logcats_file(), | 836 upload_logcats_file(), |
828 'upload_logcats_file' in args and args.upload_logcats_file) | 837 'upload_logcats_file' in args and args.upload_logcats_file) |
829 | 838 |
830 ### Set up test objects. | 839 ### Set up test objects. |
831 | 840 |
832 env = environment_factory.CreateEnvironment(args, infra_error) | 841 out_manager = output_manager_factory.CreateOutputManager(args) |
842 env = environment_factory.CreateEnvironment( | |
843 args, out_manager, infra_error) | |
833 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) | 844 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) |
834 test_run = test_run_factory.CreateTestRun( | 845 test_run = test_run_factory.CreateTestRun( |
835 args, env, test_instance, infra_error) | 846 args, env, test_instance, infra_error) |
836 | 847 |
837 ### Run. | 848 ### Run. |
838 | 849 |
839 with json_writer, logcats_uploader, env, test_instance, test_run: | 850 with out_manager: |
jbudorick
2017/08/23 16:16:20
This has to be in its own with statement and inden
mikecase (-- gone --)
2017/08/24 05:29:08
yes, pretty sure that was it. One of the things ha
| |
851 with json_writer, logcats_uploader, env, test_instance, test_run: | |
840 | 852 |
841 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 | 853 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 |
842 else itertools.count()) | 854 else itertools.count()) |
843 result_counts = collections.defaultdict( | 855 result_counts = collections.defaultdict( |
844 lambda: collections.defaultdict(int)) | 856 lambda: collections.defaultdict(int)) |
845 iteration_count = 0 | 857 iteration_count = 0 |
846 for _ in repetitions: | 858 for _ in repetitions: |
847 raw_results = test_run.RunTests() | 859 raw_results = test_run.RunTests() |
848 if not raw_results: | 860 if not raw_results: |
849 continue | 861 continue |
850 | 862 |
851 all_raw_results.append(raw_results) | 863 all_raw_results.append(raw_results) |
852 | 864 |
853 iteration_results = base_test_result.TestRunResults() | 865 iteration_results = base_test_result.TestRunResults() |
854 for r in reversed(raw_results): | 866 for r in reversed(raw_results): |
855 iteration_results.AddTestRunResults(r) | 867 iteration_results.AddTestRunResults(r) |
856 all_iteration_results.append(iteration_results) | 868 all_iteration_results.append(iteration_results) |
857 | 869 |
858 iteration_count += 1 | 870 iteration_count += 1 |
859 for r in iteration_results.GetAll(): | 871 for r in iteration_results.GetAll(): |
860 result_counts[r.GetName()][r.GetType()] += 1 | 872 result_counts[r.GetName()][r.GetType()] += 1 |
861 report_results.LogFull( | 873 report_results.LogFull( |
862 results=iteration_results, | 874 results=iteration_results, |
863 test_type=test_instance.TestType(), | 875 test_type=test_instance.TestType(), |
864 test_package=test_run.TestPackage(), | 876 test_package=test_run.TestPackage(), |
865 annotation=getattr(args, 'annotations', None), | 877 annotation=getattr(args, 'annotations', None), |
866 flakiness_server=getattr(args, 'flakiness_dashboard_server', | 878 flakiness_server=getattr(args, 'flakiness_dashboard_server', |
867 None)) | 879 None)) |
868 if args.break_on_failure and not iteration_results.DidRunPass(): | 880 if args.break_on_failure and not iteration_results.DidRunPass(): |
869 break | 881 break |
870 | 882 |
871 if iteration_count > 1: | 883 if iteration_count > 1: |
872 # display summary results | 884 # display summary results |
873 # only display results for a test if at least one test did not pass | 885 # only display results for a test if at least one test did not pass |
874 all_pass = 0 | 886 all_pass = 0 |
875 tot_tests = 0 | 887 tot_tests = 0 |
876 for test_name in result_counts: | 888 for test_name in result_counts: |
877 tot_tests += 1 | 889 tot_tests += 1 |
878 if any(result_counts[test_name][x] for x in ( | 890 if any(result_counts[test_name][x] for x in ( |
879 base_test_result.ResultType.FAIL, | 891 base_test_result.ResultType.FAIL, |
880 base_test_result.ResultType.CRASH, | 892 base_test_result.ResultType.CRASH, |
881 base_test_result.ResultType.TIMEOUT, | 893 base_test_result.ResultType.TIMEOUT, |
882 base_test_result.ResultType.UNKNOWN)): | 894 base_test_result.ResultType.UNKNOWN)): |
883 logging.critical( | 895 logging.critical( |
884 '%s: %s', | 896 '%s: %s', |
885 test_name, | 897 test_name, |
886 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) | 898 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) |
887 for i in base_test_result.ResultType.GetTypes())) | 899 for i in base_test_result.ResultType.GetTypes())) |
888 else: | 900 else: |
889 all_pass += 1 | 901 all_pass += 1 |
890 | 902 |
891 logging.critical('%s of %s tests passed in all %s runs', | 903 logging.critical('%s of %s tests passed in all %s runs', |
892 str(all_pass), | 904 str(all_pass), |
893 str(tot_tests), | 905 str(tot_tests), |
894 str(iteration_count)) | 906 str(iteration_count)) |
907 | |
908 if args.local_output and args.json_results_file: | |
909 with out_manager.ArchivedTempfile( | |
910 'test_results_presentation.html', | |
911 'test_results_presentation', | |
912 output_manager.Datatype.HTML) as results_detail_file: | |
913 result_html_string, _, _ = test_results_presentation.result_details( | |
914 json_path=args.json_results_file, | |
jbudorick
2017/08/23 16:16:20
What happens here if args.json_results_file is Non
mikecase (-- gone --)
2017/08/24 05:29:08
the if statement above is not entered...
if args.
jbudorick
2017/08/24 13:18:45
derp, missed that entirely.
| |
915 test_name=args.command, | |
916 cs_base_url='http://cs.chromium.org', | |
917 local_output=True) | |
918 results_detail_file.write(result_html_string) | |
919 results_detail_file.flush() | |
920 logging.critical('TEST RESULTS: %s', results_detail_file.Link()) | |
895 | 921 |
896 if args.command == 'perf' and (args.steps or args.single_step): | 922 if args.command == 'perf' and (args.steps or args.single_step): |
897 return 0 | 923 return 0 |
898 | 924 |
899 return (0 if all(r.DidRunPass() for r in all_iteration_results) | 925 return (0 if all(r.DidRunPass() for r in all_iteration_results) |
900 else constants.ERROR_EXIT_CODE) | 926 else constants.ERROR_EXIT_CODE) |
901 | 927 |
902 | 928 |
903 def DumpThreadStacks(_signal, _frame): | 929 def DumpThreadStacks(_signal, _frame): |
904 for thread in threading.enumerate(): | 930 for thread in threading.enumerate(): |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
986 if e.is_infra_error: | 1012 if e.is_infra_error: |
987 return constants.INFRA_EXIT_CODE | 1013 return constants.INFRA_EXIT_CODE |
988 return constants.ERROR_EXIT_CODE | 1014 return constants.ERROR_EXIT_CODE |
989 except: # pylint: disable=W0702 | 1015 except: # pylint: disable=W0702 |
990 logging.exception('Unrecognized error occurred.') | 1016 logging.exception('Unrecognized error occurred.') |
991 return constants.ERROR_EXIT_CODE | 1017 return constants.ERROR_EXIT_CODE |
992 | 1018 |
993 | 1019 |
994 if __name__ == '__main__': | 1020 if __name__ == '__main__': |
995 sys.exit(main()) | 1021 sys.exit(main()) |
OLD | NEW |