Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: build/android/test_runner.py

Issue 2511733004: [Android] Make the test runner terminate gracefully on SIGTERM. (Closed)
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse 9 import argparse
10 import collections 10 import collections
11 import contextlib
11 import itertools 12 import itertools
12 import logging 13 import logging
13 import os 14 import os
14 import signal 15 import signal
15 import sys 16 import sys
16 import threading 17 import threading
17 import unittest 18 import unittest
18 19
19 import devil_chromium 20 import devil_chromium
20 from devil import base_error 21 from devil import base_error
21 from devil.android import device_blacklist 22 from devil.android import device_blacklist
22 from devil.android import device_errors 23 from devil.android import device_errors
23 from devil.android import device_utils 24 from devil.android import device_utils
24 from devil.android import forwarder 25 from devil.android import forwarder
25 from devil.android import ports 26 from devil.android import ports
26 from devil.utils import reraiser_thread 27 from devil.utils import reraiser_thread
27 from devil.utils import run_tests_helper 28 from devil.utils import run_tests_helper
29 from devil.utils import signal_handler
28 30
29 from pylib import constants 31 from pylib import constants
30 from pylib.base import base_test_result 32 from pylib.base import base_test_result
31 from pylib.base import environment_factory 33 from pylib.base import environment_factory
32 from pylib.base import test_dispatcher 34 from pylib.base import test_dispatcher
33 from pylib.base import test_instance_factory 35 from pylib.base import test_instance_factory
34 from pylib.base import test_run_factory 36 from pylib.base import test_run_factory
35 from pylib.constants import host_paths 37 from pylib.constants import host_paths
36 from pylib.linker import setup as linker_setup 38 from pylib.linker import setup as linker_setup
37 from pylib.junit import setup as junit_setup 39 from pylib.junit import setup as junit_setup
(...skipping 654 matching lines...) Expand 10 before | Expand all | Expand 10 after
692 694
693 def RunTestsInPlatformMode(args): 695 def RunTestsInPlatformMode(args):
694 696
695 def infra_error(message): 697 def infra_error(message):
696 logging.fatal(message) 698 logging.fatal(message)
697 sys.exit(constants.INFRA_EXIT_CODE) 699 sys.exit(constants.INFRA_EXIT_CODE)
698 700
699 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 701 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
700 infra_error('%s is not yet supported in platform mode' % args.command) 702 infra_error('%s is not yet supported in platform mode' % args.command)
701 703
702 with environment_factory.CreateEnvironment(args, infra_error) as env: 704 ### Set up sigterm handler.
703 with test_instance_factory.CreateTestInstance(args, infra_error) as test:
704 with test_run_factory.CreateTestRun(
705 args, env, test, infra_error) as test_run:
706 705
707 # TODO(jbudorick): Rewrite results handling. 706 def unexpected_sigterm(_signum, _frame):
707 infra_error('Received SIGTERM. Shutting down.')
708 708
709 # all_raw_results is a list of lists of base_test_result.TestRunResults 709 sigterm_handler = signal_handler.SignalHandler(
710 # objects. Each instance of TestRunResults contains all test results 710 signal.SIGTERM, unexpected_sigterm)
711 # produced by a single try, while each list of TestRunResults contains
712 # all tries in a single iteration.
713 all_raw_results = []
714 # all_iteration_results is a list of base_test_result.TestRunResults
715 # objects. Each instance of TestRunResults contains the last test result
716 # for each test run in that iteration.
717 all_iteration_results = []
718 711
719 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 712 ### Set up results handling.
720 else itertools.count()) 713 # TODO(jbudorick): Rewrite results handling.
721 result_counts = collections.defaultdict(
722 lambda: collections.defaultdict(int))
723 iteration_count = 0
724 for _ in repetitions:
725 raw_results = test_run.RunTests()
726 if not raw_results:
727 continue
728 714
729 all_raw_results.append(raw_results) 715 # all_raw_results is a list of lists of
716 # base_test_result.TestRunResults objects. Each instance of
717 # TestRunResults contains all test results produced by a single try,
718 # while each list of TestRunResults contains all tries in a single
719 # iteration.
720 all_raw_results = []
730 721
731 iteration_results = base_test_result.TestRunResults() 722 # all_iteration_results is a list of base_test_result.TestRunResults
732 for r in reversed(raw_results): 723 # objects. Each instance of TestRunResults contains the last test
733 iteration_results.AddTestRunResults(r) 724 # result for each test run in that iteration.
734 all_iteration_results.append(iteration_results) 725 all_iteration_results = []
735 726
736 iteration_count += 1 727 @contextlib.contextmanager
737 for r in iteration_results.GetAll(): 728 def noop():
738 result_counts[r.GetName()][r.GetType()] += 1 729 yield
739 report_results.LogFull(
740 results=iteration_results,
741 test_type=test.TestType(),
742 test_package=test_run.TestPackage(),
743 annotation=getattr(args, 'annotations', None),
744 flakiness_server=getattr(args, 'flakiness_dashboard_server',
745 None))
746 if args.break_on_failure and not iteration_results.DidRunPass():
747 break
748 730
749 if iteration_count > 1: 731 json_writer = noop()
750 # display summary results 732 if args.json_results_file:
751 # only display results for a test if at least one test did not pass 733 @contextlib.contextmanager
752 all_pass = 0 734 def write_json_file():
753 tot_tests = 0 735 try:
754 for test_name in result_counts: 736 yield
755 tot_tests += 1 737 finally:
756 if any(result_counts[test_name][x] for x in ( 738 json_results.GenerateJsonResultsFile(
757 base_test_result.ResultType.FAIL, 739 all_raw_results, args.json_results_file)
758 base_test_result.ResultType.CRASH,
759 base_test_result.ResultType.TIMEOUT,
760 base_test_result.ResultType.UNKNOWN)):
761 logging.critical(
762 '%s: %s',
763 test_name,
764 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
765 for i in base_test_result.ResultType.GetTypes()))
766 else:
767 all_pass += 1
768 740
769 logging.critical('%s of %s tests passed in all %s runs', 741 json_writer = write_json_file()
770 str(all_pass),
771 str(tot_tests),
772 str(iteration_count))
773 742
774 if args.json_results_file: 743 ### Set up test objects.
775 json_results.GenerateJsonResultsFile( 744
776 all_raw_results, args.json_results_file) 745 env = environment_factory.CreateEnvironment(args, infra_error)
746 test = test_instance_factory.CreateTestInstance(args, infra_error)
rnephew (Reviews Here) 2016/11/17 19:14:24 Maybe test_instance instead of just test, since th
jbudorick 2016/11/18 16:17:55 Done.
747 test_run = test_run_factory.CreateTestRun(args, env, test, infra_error)
748
749 ### Run.
750
751 with sigterm_handler, json_writer, env, test, test_run:
752
753 repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
754 else itertools.count())
755 result_counts = collections.defaultdict(
756 lambda: collections.defaultdict(int))
757 iteration_count = 0
758 for _ in repetitions:
759 raw_results = test_run.RunTests()
760 if not raw_results:
761 continue
762
763 all_raw_results.append(raw_results)
764
765 iteration_results = base_test_result.TestRunResults()
766 for r in reversed(raw_results):
767 iteration_results.AddTestRunResults(r)
768 all_iteration_results.append(iteration_results)
769
770 iteration_count += 1
771 for r in iteration_results.GetAll():
772 result_counts[r.GetName()][r.GetType()] += 1
773 report_results.LogFull(
774 results=iteration_results,
775 test_type=test.TestType(),
776 test_package=test_run.TestPackage(),
777 annotation=getattr(args, 'annotations', None),
778 flakiness_server=getattr(args, 'flakiness_dashboard_server',
779 None))
780 if args.break_on_failure and not iteration_results.DidRunPass():
781 break
782
783 if iteration_count > 1:
784 # display summary results
785 # only display results for a test if at least one test did not pass
786 all_pass = 0
787 tot_tests = 0
788 for test_name in result_counts:
789 tot_tests += 1
790 if any(result_counts[test_name][x] for x in (
791 base_test_result.ResultType.FAIL,
792 base_test_result.ResultType.CRASH,
793 base_test_result.ResultType.TIMEOUT,
794 base_test_result.ResultType.UNKNOWN)):
795 logging.critical(
796 '%s: %s',
797 test_name,
798 ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
799 for i in base_test_result.ResultType.GetTypes()))
800 else:
801 all_pass += 1
802
803 logging.critical('%s of %s tests passed in all %s runs',
804 str(all_pass),
805 str(tot_tests),
806 str(iteration_count))
777 807
778 if args.command == 'perf' and (args.steps or args.single_step): 808 if args.command == 'perf' and (args.steps or args.single_step):
779 return 0 809 return 0
780 810
781 return (0 if all(r.DidRunPass() for r in all_iteration_results) 811 return (0 if all(r.DidRunPass() for r in all_iteration_results)
782 else constants.ERROR_EXIT_CODE) 812 else constants.ERROR_EXIT_CODE)
783 813
784 814
785 CommandConfigTuple = collections.namedtuple( 815 CommandConfigTuple = collections.namedtuple(
786 'CommandConfigTuple', 816 'CommandConfigTuple',
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
837 if e.is_infra_error: 867 if e.is_infra_error:
838 return constants.INFRA_EXIT_CODE 868 return constants.INFRA_EXIT_CODE
839 return constants.ERROR_EXIT_CODE 869 return constants.ERROR_EXIT_CODE
840 except: # pylint: disable=W0702 870 except: # pylint: disable=W0702
841 logging.exception('Unrecognized error occurred.') 871 logging.exception('Unrecognized error occurred.')
842 return constants.ERROR_EXIT_CODE 872 return constants.ERROR_EXIT_CODE
843 873
844 874
845 if __name__ == '__main__': 875 if __name__ == '__main__':
846 sys.exit(main()) 876 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/pylib/local/device/local_device_test_run.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698