OLD | NEW |
1 # -*- coding: utf-8 -*- | 1 # -*- coding: utf-8 -*- |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | 2 # Copyright 2014 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Encapsulates running tests defined in tests.py. | 6 """Encapsulates running tests defined in tests.py. |
7 | 7 |
8 Running this script requires passing --config-path with a path to a config file | 8 Running this script requires passing --config-path with a path to a config file |
9 of the following structure: | 9 of the following structure: |
10 | 10 |
(...skipping 17 matching lines...) Expand all Loading... |
28 | 28 |
29 The script uses the Python's logging library to report the test results, | 29 The script uses the Python's logging library to report the test results, |
30 as well as debugging information. It emits three levels of logs (in | 30 as well as debugging information. It emits three levels of logs (in |
31 descending order of severity): | 31 descending order of severity): |
32 logging.INFO: Summary of the tests. | 32 logging.INFO: Summary of the tests. |
33 logging.DEBUG: Details about tests failures. | 33 logging.DEBUG: Details about tests failures. |
34 SCRIPT_DEBUG (see below): Debug info of this script. | 34 SCRIPT_DEBUG (see below): Debug info of this script. |
35 You have to set up appropriate logging handlers to have the logs appear. | 35 You have to set up appropriate logging handlers to have the logs appear. |
36 """ | 36 """ |
37 | 37 |
| 38 import ConfigParser |
| 39 import Queue |
38 import argparse | 40 import argparse |
39 import ConfigParser | |
40 import logging | 41 import logging |
| 42 import multiprocessing |
41 import os | 43 import os |
42 import shutil | 44 import shutil |
43 import subprocess | 45 import stopit |
44 import tempfile | 46 import tempfile |
45 import time | 47 import time |
46 | 48 |
| 49 from threading import Thread |
| 50 from collections import defaultdict |
| 51 |
47 import tests | 52 import tests |
48 | 53 |
49 | 54 |
50 # Just below logging.DEBUG, use for this script's debug messages instead | 55 # Just below logging.DEBUG, use for this script's debug messages instead |
51 # of logging.DEBUG, which is already used for detailed test debug messages. | 56 # of logging.DEBUG, which is already used for detailed test debug messages. |
52 SCRIPT_DEBUG = 9 | 57 SCRIPT_DEBUG = 9 |
53 | 58 |
| 59 class Config: |
| 60 test_cases_to_run = tests.TEST_CASES |
| 61 save_only_fails = False |
| 62 tests_to_run = tests.all_tests.keys() |
| 63 max_tests_in_parallel = 1 |
54 | 64 |
55 class TestRunner(object): | 65 def __init__(self, config_path): |
56 """Runs tests for a single website.""" | 66 config = ConfigParser.ConfigParser() |
| 67 config.read(config_path) |
| 68 if config.has_option("run_options", "tests_in_parallel"): |
| 69 self.max_tests_in_parallel = config.getint( |
| 70 "run_options", "tests_in_parallel") |
57 | 71 |
58 def __init__(self, test_cmd, test_name): | 72 self.chrome_path = config.get("binaries", "chrome-path") |
59 """Initialize the TestRunner. | 73 self.chromedriver_path = config.get("binaries", "chromedriver-path") |
| 74 self.passwords_path = config.get("data_files", "passwords_path") |
60 | 75 |
61 Args: | 76 if config.has_option("run_options", "tests_to_run"): |
62 test_cmd: List of command line arguments to be supplied to | 77 self.tests_to_run = config.get("run_options", "tests_to_run").split(",") |
63 every test run. | |
64 test_name: Test name (e.g., facebook). | |
65 """ | |
66 self.logger = logging.getLogger("run_tests") | |
67 | 78 |
68 self.profile_path = tempfile.mkdtemp() | 79 if config.has_option("run_options", "test_cases_to_run"): |
69 results = tempfile.NamedTemporaryFile(delete=False) | 80 self.test_cases_to_run = config.get( |
70 self.results_path = results.name | 81 "run_options", "test_cases_to_run").split(",") |
71 results.close() | 82 if (config.has_option("logging", "save-only-fails")): |
72 self.test_cmd = test_cmd + ["--profile-path", self.profile_path, | 83 self.save_only_fails = config.getboolean("logging", "save-only-fails") |
73 "--save-path", self.results_path] | |
74 self.test_name = test_name | |
75 # TODO(vabr): Ideally we would replace timeout with something allowing | |
76 # calling tests directly inside Python, and working on other platforms. | |
77 # | |
78 # The website test runs multiple scenarios, each one has an internal | |
79 # timeout of 200s for waiting (see |remaining_time_to_wait| and | |
80 # Wait() in websitetest.py). Expecting that not every scenario should | |
81 # take 200s, the maximum time allocated for all of them is 300s. | |
82 self.test_cmd = ["timeout", "300"] + self.test_cmd | |
83 | |
84 self.logger.log(SCRIPT_DEBUG, | |
85 "TestRunner set up for test %s, command '%s', " | |
86 "profile path %s, results file %s", | |
87 self.test_name, self.test_cmd, self.profile_path, | |
88 self.results_path) | |
89 | |
90 self.runner_process = None | |
91 # The tests can be flaky. This is why we try to rerun up to 3 times. | |
92 self.max_test_runs_left = 3 | |
93 self.failures = [] | |
94 self._run_test() | |
95 | |
96 def get_test_result(self): | |
97 """Return the test results. | |
98 | |
99 Returns: | |
100 (True, []) if the test passed. | |
101 (False, list_of_failures) if the test failed. | |
102 None if the test is still running. | |
103 """ | |
104 | |
105 test_running = self.runner_process and self.runner_process.poll() is None | |
106 if test_running: | |
107 return None | |
108 # Test is not running, now we have to check if we want to start it again. | |
109 if self._check_if_test_passed(): | |
110 self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name) | |
111 return True, [] | |
112 if self.max_test_runs_left == 0: | |
113 self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name) | |
114 return False, self.failures | |
115 self._run_test() | |
116 return None | |
117 | |
118 def _check_if_test_passed(self): | |
119 """Returns True if and only if the test passed.""" | |
120 | |
121 success = False | |
122 if os.path.isfile(self.results_path): | |
123 with open(self.results_path, "r") as results: | |
124 # TODO(vabr): Parse the results to make sure all scenarios succeeded | |
125 # instead of hard-coding here the number of tests scenarios from | |
126 # test.py:main. | |
127 NUMBER_OF_TEST_SCENARIOS = 3 | |
128 passed_scenarios = 0 | |
129 for line in results: | |
130 self.failures.append(line) | |
131 passed_scenarios += line.count("successful='True'") | |
132 success = passed_scenarios == NUMBER_OF_TEST_SCENARIOS | |
133 if success: | |
134 break | |
135 | |
136 self.logger.log( | |
137 SCRIPT_DEBUG, | |
138 "Test run of {0} has succeeded: {1}".format(self.test_name, success)) | |
139 return success | |
140 | |
141 def _run_test(self): | |
142 """Executes the command to run the test.""" | |
143 with open(self.results_path, "w"): | |
144 pass # Just clear the results file. | |
145 shutil.rmtree(path=self.profile_path, ignore_errors=True) | |
146 self.max_test_runs_left -= 1 | |
147 self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name) | |
148 self.runner_process = subprocess.Popen(self.test_cmd) | |
149 | 84 |
150 | 85 |
151 def _apply_defaults(config, defaults): | 86 def LogResultsOfTestRun(config, results): |
152 """Adds default values from |defaults| to |config|. | 87 """ Logs |results| of a test run. """ |
| 88 logger = logging.getLogger("run_tests") |
| 89 failed_tests = [] |
| 90 failed_tests_num = 0 |
| 91 for result in results: |
| 92 website, test_case, success, reason = result |
| 93 if not (config.save_only_fails and success): |
| 94 logger.debug("Test case %s has %s on Website %s", test_case, |
| 95 website, {True: "passed", False: "failed"}[success]) |
| 96 if not success: |
| 97 logger.debug("Reason of failure: %s", reason) |
153 | 98 |
154 Note: This differs from ConfigParser's mechanism for providing defaults in | 99 if not success: |
155 two aspects: | 100 failed_tests.append("%s.%s" % (website, test_case)) |
156 * The "defaults" here become explicit, and are associated with sections. | 101 failed_tests_num += 1 |
157 * Sections get created for the added defaults where needed, that is, if | |
158 they do not exist before. | |
159 | 102 |
160 Args: | 103 logger.info("%d failed test cases out of %d, failing test cases: %s", |
161 config: A ConfigParser instance to be updated | 104 failed_tests_num, len(results), |
162 defaults: A dictionary mapping (section_string, option_string) pairs | 105 sorted([name for name in failed_tests])) |
163 to string values. For every section/option combination not already | |
164 contained in |config|, the value from |defaults| is stored in |config|. | |
165 """ | |
166 for (section, option) in defaults: | |
167 if not config.has_section(section): | |
168 config.add_section(section) | |
169 if not config.has_option(section, option): | |
170 config.set(section, option, defaults[(section, option)]) | |
171 | 106 |
172 | 107 |
173 def run_tests(config_path): | 108 def RunTestCaseOnWebsite((website, test_case, config)): |
| 109 """ Runs a |test_case| on a |website|. In case when |test_case| has |
| 110 failed it tries to rerun it. If run takes too long, then it is stopped. |
| 111 """ |
| 112 |
| 113 profile_path = tempfile.mkdtemp() |
| 114 # The tests can be flaky. This is why we try to rerun up to 3 times. |
| 115 attempts = 3 |
| 116 result = ("", "", False, "") |
| 117 logger = logging.getLogger("run_tests") |
| 118 for _ in xrange(attempts): |
| 119 shutil.rmtree(path=profile_path, ignore_errors=True) |
| 120 logger.log(SCRIPT_DEBUG, "Run of test case %s of website %s started", |
| 121 test_case, website) |
| 122 try: |
| 123 with stopit.ThreadingTimeout(100) as timeout: |
| 124 logger.log(SCRIPT_DEBUG, |
| 125 "Run test with parameters: %s %s %s %s %s %s", |
| 126 config.chrome_path, config.chromedriver_path, |
| 127 profile_path, config.passwords_path, |
| 128 website, test_case) |
| 129 result = tests.RunTest(config.chrome_path, config.chromedriver_path, |
| 130 profile_path, config.passwords_path, |
| 131 website, test_case)[0] |
| 132 if timeout != timeout.EXECUTED: |
| 133 result = (website, test_case, False, "Timeout") |
| 134 _, _, success, _ = result |
| 135 if success: |
| 136 return result |
| 137 except Exception as e: |
| 138 result = (website, test_case, False, e) |
| 139 return result |
| 140 |
| 141 |
| 142 def RunTests(config_path): |
174 """Runs automated tests. | 143 """Runs automated tests. |
175 | 144 |
176 Runs the tests and returns the results through logging: | 145 Runs the tests and returns the results through logging: |
177 On logging.INFO logging level, it returns the summary of how many tests | 146 On logging.INFO logging level, it returns the summary of how many tests |
178 passed and failed. | 147 passed and failed. |
179 On logging.DEBUG logging level, it returns the failure logs, if any. | 148 On logging.DEBUG logging level, it returns the failure logs, if any. |
180 (On SCRIPT_DEBUG it returns diagnostics for this script.) | 149 (On SCRIPT_DEBUG it returns diagnostics for this script.) |
181 | 150 |
182 Args: | 151 Args: |
183 config_path: The path to the config INI file. See the top of the file | 152 config_path: The path to the config INI file. See the top of the file |
184 for format description. | 153 for format description. |
185 """ | 154 """ |
186 def has_test_run_finished(runner, result): | 155 config = Config(config_path) |
187 result = runner.get_test_result() | |
188 if result: # This test run is finished. | |
189 status, log = result | |
190 results.append((runner.test_name, status, log)) | |
191 return True | |
192 else: | |
193 return False | |
194 | |
195 defaults = {("run_options", "tests_in_parallel"): "1"} | |
196 config = ConfigParser.ConfigParser() | |
197 _apply_defaults(config, defaults) | |
198 config.read(config_path) | |
199 max_tests_in_parallel = config.getint("run_options", "tests_in_parallel") | |
200 full_path = os.path.realpath(__file__) | |
201 tests_dir = os.path.dirname(full_path) | |
202 tests_path = os.path.join(tests_dir, "tests.py") | |
203 test_name_idx = 2 # Index of "test_name_placeholder" below. | |
204 general_test_cmd = ["python", tests_path, "test_name_placeholder", | |
205 "--chrome-path", config.get("binaries", "chrome-path"), | |
206 "--chromedriver-path", | |
207 config.get("binaries", "chromedriver-path"), | |
208 "--passwords-path", | |
209 config.get("data_files", "passwords_path")] | |
210 runners = [] | |
211 if config.has_option("run_options", "tests_to_run"): | |
212 tests_to_run = config.get("run_options", "tests_to_run").split(",") | |
213 else: | |
214 tests_to_run = tests.all_tests.keys() | |
215 if (config.has_option("logging", "save-only-failures") and | |
216 config.getboolean("logging", "save-only-failures")): | |
217 general_test_cmd.append("--save-only-failures") | |
218 | |
219 if config.has_option("run_options", "test_cases_to_run"): | |
220 general_test_cmd += ["--test-cases-to-run", | |
221 config.get("run_options", "test_cases_to_run").replace(",", " ")] | |
222 | |
223 logger = logging.getLogger("run_tests") | 156 logger = logging.getLogger("run_tests") |
224 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run), | 157 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(config.tests_to_run), |
225 tests_to_run) | 158 config.tests_to_run) |
226 results = [] # List of (name, bool_passed, failure_log). | 159 data = [(website, test_case, config) |
227 while len(runners) + len(tests_to_run) > 0: | 160 for website in config.tests_to_run |
228 runners = [runner for runner in runners if not has_test_run_finished( | 161 for test_case in config.test_cases_to_run] |
229 runner, results)] | 162 number_of_processes = min([config.max_tests_in_parallel, |
230 while len(runners) < max_tests_in_parallel and len(tests_to_run): | 163 len(config.test_cases_to_run) * |
231 test_name = tests_to_run.pop() | 164 len(config.tests_to_run)]) |
232 specific_test_cmd = list(general_test_cmd) | 165 p = multiprocessing.Pool(number_of_processes) |
233 specific_test_cmd[test_name_idx] = test_name | 166 results = p.map(RunTestCaseOnWebsite, data) |
234 runners.append(TestRunner(specific_test_cmd, test_name)) | 167 p.close() |
235 time.sleep(1) | 168 p.join() |
236 failed_tests = [(name, log) for (name, passed, log) in results if not passed] | 169 LogResultsOfTestRun(config, results) |
237 logger.info("%d failed tests out of %d, failing tests: %s", | |
238 len(failed_tests), len(results), | |
239 [name for (name, _) in failed_tests]) | |
240 logger.debug("Logs of failing tests: %s", failed_tests) | |
241 | 170 |
242 | 171 |
243 def main(): | 172 def main(): |
244 parser = argparse.ArgumentParser() | 173 parser = argparse.ArgumentParser() |
245 parser.add_argument("config_path", metavar="N", | 174 parser.add_argument("config_path", metavar="N", |
246 help="Path to the config.ini file.") | 175 help="Path to the config.ini file.") |
247 args = parser.parse_args() | 176 args = parser.parse_args() |
248 run_tests(args.config_path) | 177 RunTests(args.config_path) |
249 | 178 |
250 | 179 |
251 if __name__ == "__main__": | 180 if __name__ == "__main__": |
252 main() | 181 main() |
OLD | NEW |