Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(130)

Side by Side Diff: build/android/test_runner.py

Issue 2511243005: Revert of [Android] Make the test runner terminate gracefully on SIGTERM. (Closed)
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse 9 import argparse
10 import collections 10 import collections
11 import contextlib
12 import itertools 11 import itertools
13 import logging 12 import logging
14 import os 13 import os
15 import signal 14 import signal
16 import sys 15 import sys
17 import threading 16 import threading
18 import unittest 17 import unittest
19 18
20 import devil_chromium 19 import devil_chromium
21 from devil import base_error 20 from devil import base_error
22 from devil.android import device_blacklist 21 from devil.android import device_blacklist
23 from devil.android import device_errors 22 from devil.android import device_errors
24 from devil.android import device_utils 23 from devil.android import device_utils
25 from devil.android import forwarder 24 from devil.android import forwarder
26 from devil.android import ports 25 from devil.android import ports
27 from devil.utils import reraiser_thread 26 from devil.utils import reraiser_thread
28 from devil.utils import run_tests_helper 27 from devil.utils import run_tests_helper
29 from devil.utils import signal_handler
30 28
31 from pylib import constants 29 from pylib import constants
32 from pylib.base import base_test_result 30 from pylib.base import base_test_result
33 from pylib.base import environment_factory 31 from pylib.base import environment_factory
34 from pylib.base import test_dispatcher 32 from pylib.base import test_dispatcher
35 from pylib.base import test_instance_factory 33 from pylib.base import test_instance_factory
36 from pylib.base import test_run_factory 34 from pylib.base import test_run_factory
37 from pylib.constants import host_paths 35 from pylib.constants import host_paths
38 from pylib.linker import setup as linker_setup 36 from pylib.linker import setup as linker_setup
39 from pylib.junit import setup as junit_setup 37 from pylib.junit import setup as junit_setup
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
694 692
695 def RunTestsInPlatformMode(args): 693 def RunTestsInPlatformMode(args):
696 694
697 def infra_error(message): 695 def infra_error(message):
698 logging.fatal(message) 696 logging.fatal(message)
699 sys.exit(constants.INFRA_EXIT_CODE) 697 sys.exit(constants.INFRA_EXIT_CODE)
700 698
701 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 699 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
702 infra_error('%s is not yet supported in platform mode' % args.command) 700 infra_error('%s is not yet supported in platform mode' % args.command)
703 701
704 ### Set up sigterm handler. 702 with environment_factory.CreateEnvironment(args, infra_error) as env:
703 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
704 with test_run_factory.CreateTestRun(
705 args, env, test, infra_error) as test_run:
705 706
706 def unexpected_sigterm(_signum, _frame): 707 # TODO(jbudorick): Rewrite results handling.
707 infra_error('Received SIGTERM. Shutting down.')
708 708
709 sigterm_handler = signal_handler.SignalHandler( 709 # all_raw_results is a list of lists of base_test_result.TestRunResults
710 signal.SIGTERM, unexpected_sigterm) 710 # objects. Each instance of TestRunResults contains all test results
711 # produced by a single try, while each list of TestRunResults contains
712 # all tries in a single iteration.
713 all_raw_results = []
714 # all_iteration_results is a list of base_test_result.TestRunResults
715 # objects. Each instance of TestRunResults contains the last test result
716 # for each test run in that iteration.
717 all_iteration_results = []
711 718
712 ### Set up results handling. 719 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
713 # TODO(jbudorick): Rewrite results handling. 720 else itertools.count())
721 result_counts = collections.defaultdict(
722 lambda: collections.defaultdict(int))
723 iteration_count = 0
724 for _ in repetitions:
725 raw_results = test_run.RunTests()
726 if not raw_results:
727 continue
714 728
715 # all_raw_results is a list of lists of 729 all_raw_results.append(raw_results)
716 # base_test_result.TestRunResults objects. Each instance of
717 # TestRunResults contains all test results produced by a single try,
718 # while each list of TestRunResults contains all tries in a single
719 # iteration.
720 all_raw_results = []
721 730
722 # all_iteration_results is a list of base_test_result.TestRunResults 731 iteration_results = base_test_result.TestRunResults()
723 # objects. Each instance of TestRunResults contains the last test 732 for r in reversed(raw_results):
724 # result for each test run in that iteration. 733 iteration_results.AddTestRunResults(r)
725 all_iteration_results = [] 734 all_iteration_results.append(iteration_results)
726 735
727 @contextlib.contextmanager 736 iteration_count += 1
728 def noop(): 737 for r in iteration_results.GetAll():
729 yield 738 result_counts[r.GetName()][r.GetType()] += 1
739 report_results.LogFull(
740 results=iteration_results,
741 test_type=test.TestType(),
742 test_package=test_run.TestPackage(),
743 annotation=getattr(args, 'annotations', None),
744 flakiness_server=getattr(args, 'flakiness_dashboard_server',
745 None))
746 if args.break_on_failure and not iteration_results.DidRunPass():
747 break
730 748
731 json_writer = noop() 749 if iteration_count > 1:
732 if args.json_results_file: 750 # display summary results
733 @contextlib.contextmanager 751 # only display results for a test if at least one test did not pass
734 def write_json_file(): 752 all_pass = 0
735 try: 753 tot_tests = 0
736 yield 754 for test_name in result_counts:
737 finally: 755 tot_tests += 1
738 json_results.GenerateJsonResultsFile( 756 if any(result_counts[test_name][x] for x in (
739 all_raw_results, args.json_results_file) 757 base_test_result.ResultType.FAIL,
758 base_test_result.ResultType.CRASH,
759 base_test_result.ResultType.TIMEOUT,
760 base_test_result.ResultType.UNKNOWN)):
761 logging.critical(
762 '%s: %s',
763 test_name,
764 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
765 for i in base_test_result.ResultType.GetTypes()))
766 else:
767 all_pass += 1
740 768
741 json_writer = write_json_file() 769 logging.critical('%s of %s tests passed in all %s runs',
770 str(all_pass),
771 str(tot_tests),
772 str(iteration_count))
742 773
743 ### Set up test objects. 774 if args.json_results_file:
744 775 json_results.GenerateJsonResultsFile(
745 env = environment_factory.CreateEnvironment(args, infra_error) 776 all_raw_results, args.json_results_file)
746 test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
747 test_run = test_run_factory.CreateTestRun(
748 args, env, test_instance, infra_error)
749
750 ### Run.
751
752 with sigterm_handler, json_writer, env, test_instance, test_run:
753
754 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
755 else itertools.count())
756 result_counts = collections.defaultdict(
757 lambda: collections.defaultdict(int))
758 iteration_count = 0
759 for _ in repetitions:
760 raw_results = test_run.RunTests()
761 if not raw_results:
762 continue
763
764 all_raw_results.append(raw_results)
765
766 iteration_results = base_test_result.TestRunResults()
767 for r in reversed(raw_results):
768 iteration_results.AddTestRunResults(r)
769 all_iteration_results.append(iteration_results)
770
771 iteration_count += 1
772 for r in iteration_results.GetAll():
773 result_counts[r.GetName()][r.GetType()] += 1
774 report_results.LogFull(
775 results=iteration_results,
776 test_type=test_instance.TestType(),
777 test_package=test_run.TestPackage(),
778 annotation=getattr(args, 'annotations', None),
779 flakiness_server=getattr(args, 'flakiness_dashboard_server',
780 None))
781 if args.break_on_failure and not iteration_results.DidRunPass():
782 break
783
784 if iteration_count > 1:
785 # display summary results
786 # only display results for a test if at least one test did not pass
787 all_pass = 0
788 tot_tests = 0
789 for test_name in result_counts:
790 tot_tests += 1
791 if any(result_counts[test_name][x] for x in (
792 base_test_result.ResultType.FAIL,
793 base_test_result.ResultType.CRASH,
794 base_test_result.ResultType.TIMEOUT,
795 base_test_result.ResultType.UNKNOWN)):
796 logging.critical(
797 '%s: %s',
798 test_name,
799 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
800 for i in base_test_result.ResultType.GetTypes()))
801 else:
802 all_pass += 1
803
804 logging.critical('%s of %s tests passed in all %s runs',
805 str(all_pass),
806 str(tot_tests),
807 str(iteration_count))
808 777
809 if args.command == 'perf' and (args.steps or args.single_step): 778 if args.command == 'perf' and (args.steps or args.single_step):
810 return 0 779 return 0
811 780
812 return (0 if all(r.DidRunPass() for r in all_iteration_results) 781 return (0 if all(r.DidRunPass() for r in all_iteration_results)
813 else constants.ERROR_EXIT_CODE) 782 else constants.ERROR_EXIT_CODE)
814 783
815 784
816 CommandConfigTuple = collections.namedtuple( 785 CommandConfigTuple = collections.namedtuple(
817 'CommandConfigTuple', 786 'CommandConfigTuple',
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
868 if e.is_infra_error: 837 if e.is_infra_error:
869 return constants.INFRA_EXIT_CODE 838 return constants.INFRA_EXIT_CODE
870 return constants.ERROR_EXIT_CODE 839 return constants.ERROR_EXIT_CODE
871 except: # pylint: disable=W0702 840 except: # pylint: disable=W0702
872 logging.exception('Unrecognized error occurred.') 841 logging.exception('Unrecognized error occurred.')
873 return constants.ERROR_EXIT_CODE 842 return constants.ERROR_EXIT_CODE
874 843
875 844
876 if __name__ == '__main__': 845 if __name__ == '__main__':
877 sys.exit(main()) 846 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698