Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(448)

Side by Side Diff: build/android/test_runner.py

Issue 2511733004: [Android] Make the test runner terminate gracefully on SIGTERM. (Closed)
Patch Set: rnephew comment Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse 9 import argparse
10 import collections 10 import collections
11 import contextlib
11 import itertools 12 import itertools
12 import logging 13 import logging
13 import os 14 import os
14 import signal 15 import signal
15 import sys 16 import sys
16 import threading 17 import threading
17 import unittest 18 import unittest
18 19
19 import devil_chromium 20 import devil_chromium
20 from devil import base_error 21 from devil import base_error
21 from devil.android import device_blacklist 22 from devil.android import device_blacklist
22 from devil.android import device_errors 23 from devil.android import device_errors
23 from devil.android import device_utils 24 from devil.android import device_utils
24 from devil.android import forwarder 25 from devil.android import forwarder
25 from devil.android import ports 26 from devil.android import ports
26 from devil.utils import reraiser_thread 27 from devil.utils import reraiser_thread
27 from devil.utils import run_tests_helper 28 from devil.utils import run_tests_helper
29 from devil.utils import signal_handler
28 30
29 from pylib import constants 31 from pylib import constants
30 from pylib.base import base_test_result 32 from pylib.base import base_test_result
31 from pylib.base import environment_factory 33 from pylib.base import environment_factory
32 from pylib.base import test_dispatcher 34 from pylib.base import test_dispatcher
33 from pylib.base import test_instance_factory 35 from pylib.base import test_instance_factory
34 from pylib.base import test_run_factory 36 from pylib.base import test_run_factory
35 from pylib.constants import host_paths 37 from pylib.constants import host_paths
36 from pylib.linker import setup as linker_setup 38 from pylib.linker import setup as linker_setup
37 from pylib.junit import setup as junit_setup 39 from pylib.junit import setup as junit_setup
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
692 694
693 def RunTestsInPlatformMode(args): 695 def RunTestsInPlatformMode(args):
694 696
695 def infra_error(message): 697 def infra_error(message):
696 logging.fatal(message) 698 logging.fatal(message)
697 sys.exit(constants.INFRA_EXIT_CODE) 699 sys.exit(constants.INFRA_EXIT_CODE)
698 700
699 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 701 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
700 infra_error('%s is not yet supported in platform mode' % args.command) 702 infra_error('%s is not yet supported in platform mode' % args.command)
701 703
702 with environment_factory.CreateEnvironment(args, infra_error) as env: 704 ### Set up sigterm handler.
703 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
704 with test_run_factory.CreateTestRun(
705 args, env, test, infra_error) as test_run:
706 705
707 # TODO(jbudorick): Rewrite results handling. 706 def unexpected_sigterm(_signum, _frame):
707 infra_error('Received SIGTERM. Shutting down.')
708 708
709 # all_raw_results is a list of lists of base_test_result.TestRunResults 709 sigterm_handler = signal_handler.SignalHandler(
710 # objects. Each instance of TestRunResults contains all test results 710 signal.SIGTERM, unexpected_sigterm)
711 # produced by a single try, while each list of TestRunResults contains
712 # all tries in a single iteration.
713 all_raw_results = []
714 # all_iteration_results is a list of base_test_result.TestRunResults
715 # objects. Each instance of TestRunResults contains the last test result
716 # for each test run in that iteration.
717 all_iteration_results = []
718 711
719 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 712 ### Set up results handling.
720 else itertools.count()) 713 # TODO(jbudorick): Rewrite results handling.
721 result_counts = collections.defaultdict(
722 lambda: collections.defaultdict(int))
723 iteration_count = 0
724 for _ in repetitions:
725 raw_results = test_run.RunTests()
726 if not raw_results:
727 continue
728 714
729 all_raw_results.append(raw_results) 715 # all_raw_results is a list of lists of
716 # base_test_result.TestRunResults objects. Each instance of
717 # TestRunResults contains all test results produced by a single try,
718 # while each list of TestRunResults contains all tries in a single
719 # iteration.
720 all_raw_results = []
730 721
731 iteration_results = base_test_result.TestRunResults() 722 # all_iteration_results is a list of base_test_result.TestRunResults
732 for r in reversed(raw_results): 723 # objects. Each instance of TestRunResults contains the last test
733 iteration_results.AddTestRunResults(r) 724 # result for each test run in that iteration.
734 all_iteration_results.append(iteration_results) 725 all_iteration_results = []
735 726
736 iteration_count += 1 727 @contextlib.contextmanager
737 for r in iteration_results.GetAll(): 728 def noop():
738 result_counts[r.GetName()][r.GetType()] += 1 729 yield
739 report_results.LogFull(
740 results=iteration_results,
741 test_type=test.TestType(),
742 test_package=test_run.TestPackage(),
743 annotation=getattr(args, 'annotations', None),
744 flakiness_server=getattr(args, 'flakiness_dashboard_server',
745 None))
746 if args.break_on_failure and not iteration_results.DidRunPass():
747 break
748 730
749 if iteration_count > 1: 731 json_writer = noop()
750 # display summary results 732 if args.json_results_file:
751 # only display results for a test if at least one test did not pass 733 @contextlib.contextmanager
752 all_pass = 0 734 def write_json_file():
753 tot_tests = 0 735 try:
754 for test_name in result_counts: 736 yield
755 tot_tests += 1 737 finally:
756 if any(result_counts[test_name][x] for x in ( 738 json_results.GenerateJsonResultsFile(
757 base_test_result.ResultType.FAIL, 739 all_raw_results, args.json_results_file)
758 base_test_result.ResultType.CRASH,
759 base_test_result.ResultType.TIMEOUT,
760 base_test_result.ResultType.UNKNOWN)):
761 logging.critical(
762 '%s: %s',
763 test_name,
764 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
765 for i in base_test_result.ResultType.GetTypes()))
766 else:
767 all_pass += 1
768 740
769 logging.critical('%s of %s tests passed in all %s runs', 741 json_writer = write_json_file()
770 str(all_pass),
771 str(tot_tests),
772 str(iteration_count))
773 742
774 if args.json_results_file: 743 ### Set up test objects.
775 json_results.GenerateJsonResultsFile( 744
776 all_raw_results, args.json_results_file) 745 env = environment_factory.CreateEnvironment(args, infra_error)
746 test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
747 test_run = test_run_factory.CreateTestRun(
748 args, env, test_instance, infra_error)
749
750 ### Run.
751
752 with sigterm_handler, json_writer, env, test_instance, test_run:
753
754 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
755 else itertools.count())
756 result_counts = collections.defaultdict(
757 lambda: collections.defaultdict(int))
758 iteration_count = 0
759 for _ in repetitions:
760 raw_results = test_run.RunTests()
761 if not raw_results:
762 continue
763
764 all_raw_results.append(raw_results)
765
766 iteration_results = base_test_result.TestRunResults()
767 for r in reversed(raw_results):
768 iteration_results.AddTestRunResults(r)
769 all_iteration_results.append(iteration_results)
770
771 iteration_count += 1
772 for r in iteration_results.GetAll():
773 result_counts[r.GetName()][r.GetType()] += 1
774 report_results.LogFull(
775 results=iteration_results,
776 test_type=test_instance.TestType(),
777 test_package=test_run.TestPackage(),
778 annotation=getattr(args, 'annotations', None),
779 flakiness_server=getattr(args, 'flakiness_dashboard_server',
780 None))
781 if args.break_on_failure and not iteration_results.DidRunPass():
782 break
783
784 if iteration_count > 1:
785 # display summary results
786 # only display results for a test if at least one test did not pass
787 all_pass = 0
788 tot_tests = 0
789 for test_name in result_counts:
790 tot_tests += 1
791 if any(result_counts[test_name][x] for x in (
792 base_test_result.ResultType.FAIL,
793 base_test_result.ResultType.CRASH,
794 base_test_result.ResultType.TIMEOUT,
795 base_test_result.ResultType.UNKNOWN)):
796 logging.critical(
797 '%s: %s',
798 test_name,
799 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
800 for i in base_test_result.ResultType.GetTypes()))
801 else:
802 all_pass += 1
803
804 logging.critical('%s of %s tests passed in all %s runs',
805 str(all_pass),
806 str(tot_tests),
807 str(iteration_count))
777 808
778 if args.command == 'perf' and (args.steps or args.single_step): 809 if args.command == 'perf' and (args.steps or args.single_step):
779 return 0 810 return 0
780 811
781 return (0 if all(r.DidRunPass() for r in all_iteration_results) 812 return (0 if all(r.DidRunPass() for r in all_iteration_results)
782 else constants.ERROR_EXIT_CODE) 813 else constants.ERROR_EXIT_CODE)
783 814
784 815
785 CommandConfigTuple = collections.namedtuple( 816 CommandConfigTuple = collections.namedtuple(
786 'CommandConfigTuple', 817 'CommandConfigTuple',
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
837 if e.is_infra_error: 868 if e.is_infra_error:
838 return constants.INFRA_EXIT_CODE 869 return constants.INFRA_EXIT_CODE
839 return constants.ERROR_EXIT_CODE 870 return constants.ERROR_EXIT_CODE
840 except: # pylint: disable=W0702 871 except: # pylint: disable=W0702
841 logging.exception('Unrecognized error occurred.') 872 logging.exception('Unrecognized error occurred.')
842 return constants.ERROR_EXIT_CODE 873 return constants.ERROR_EXIT_CODE
843 874
844 875
845 if __name__ == '__main__': 876 if __name__ == '__main__':
846 sys.exit(main()) 877 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698