Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(180)

Side by Side Diff: components/test/data/password_manager/automated_tests/run_tests.py

Issue 1089383002: [Password manager tests automation] Refactor test_runner. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@base
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # -*- coding: utf-8 -*- 1 # -*- coding: utf-8 -*-
2 # Copyright 2014 The Chromium Authors. All rights reserved. 2 # Copyright 2014 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Encapsulates running tests defined in tests.py. 6 """Encapsulates running tests defined in tests.py.
7 7
8 Running this script requires passing --config-path with a path to a config file 8 Running this script requires passing --config-path with a path to a config file
9 of the following structure: 9 of the following structure:
10 10
(...skipping 13 matching lines...) Expand all
24 24
25 The script uses the Python's logging library to report the test results, 25 The script uses the Python's logging library to report the test results,
26 as well as debugging information. It emits three levels of logs (in 26 as well as debugging information. It emits three levels of logs (in
27 descending order of severity): 27 descending order of severity):
28 logging.INFO: Summary of the tests. 28 logging.INFO: Summary of the tests.
29 logging.DEBUG: Details about tests failures. 29 logging.DEBUG: Details about tests failures.
30 SCRIPT_DEBUG (see below): Debug info of this script. 30 SCRIPT_DEBUG (see below): Debug info of this script.
31 You have to set up appropriate logging handlers to have the logs appear. 31 You have to set up appropriate logging handlers to have the logs appear.
32 """ 32 """
33 33
34 import ConfigParser
35 import Queue
34 import argparse 36 import argparse
35 import ConfigParser
36 import logging 37 import logging
38 import multiprocessing
37 import os 39 import os
38 import shutil 40 import shutil
39 import subprocess 41 import stopit
40 import tempfile 42 import tempfile
41 import time 43 import time
42 44
45 from threading import Thread
46 from collections import defaultdict
47
43 import tests 48 import tests
44 49
45 50
46 # Just below logging.DEBUG, use for this script's debug messages instead 51 # Just below logging.DEBUG, use for this script's debug messages instead
47 # of logging.DEBUG, which is already used for detailed test debug messages. 52 # of logging.DEBUG, which is already used for detailed test debug messages.
48 SCRIPT_DEBUG = 9 53 SCRIPT_DEBUG = 9
49 54
55 class Config:
56 test_cases_to_run = tests.TEST_CASES
57 save_only_fails = False
58 tests_to_run = tests.all_tests.keys()
59 max_tests_in_parallel = 1
50 60
51 class TestRunner(object): 61 def __init__(self, config_path):
52 """Runs tests for a single website.""" 62 config = ConfigParser.ConfigParser()
63 config.read(config_path)
64 if config.has_option("run_options", "tests_in_parallel"):
65 self.max_tests_in_parallel = config.getint(
66 "run_options", "tests_in_parallel")
53 67
54 def __init__(self, test_cmd, test_name): 68 self.chrome_path = config.get("binaries", "chrome-path")
55 """Initialize the TestRunner. 69 self.chromedriver_path = config.get("binaries", "chromedriver-path")
70 self.passwords_path = config.get("data_files", "passwords_path")
56 71
57 Args: 72 if config.has_option("run_options", "tests_to_run"):
58 test_cmd: List of command line arguments to be supplied to 73 self.tests_to_run = config.get("run_options", "tests_to_run").split(",")
59 every test run.
60 test_name: Test name (e.g., facebook).
61 """
62 self.logger = logging.getLogger("run_tests")
63 74
64 self.profile_path = tempfile.mkdtemp() 75 if config.has_option("run_options", "test_cases_to_run"):
65 results = tempfile.NamedTemporaryFile(delete=False) 76 self.test_cases_to_run = config.get(
66 self.results_path = results.name 77 "run_options", "test_cases_to_run").split(",")
67 results.close() 78 if (config.has_option("logging", "save-only-fails")):
68 self.test_cmd = test_cmd + ["--profile-path", self.profile_path, 79 self.save_only_fails = config.getboolean("logging", "save-only-fails")
69 "--save-path", self.results_path]
70 self.test_name = test_name
71 # TODO(vabr): Ideally we would replace timeout with something allowing
72 # calling tests directly inside Python, and working on other platforms.
73 #
74 # The website test runs multiple scenarios, each one has an internal
75 # timeout of 200s for waiting (see |remaining_time_to_wait| and
76 # Wait() in websitetest.py). Expecting that not every scenario should
77 # take 200s, the maximum time allocated for all of them is 300s.
78 self.test_cmd = ["timeout", "300"] + self.test_cmd
79
80 self.logger.log(SCRIPT_DEBUG,
81 "TestRunner set up for test %s, command '%s', "
82 "profile path %s, results file %s",
83 self.test_name, self.test_cmd, self.profile_path,
84 self.results_path)
85
86 self.runner_process = None
87 # The tests can be flaky. This is why we try to rerun up to 3 times.
88 self.max_test_runs_left = 3
89 self.failures = []
90 self._run_test()
91
92 def get_test_result(self):
93 """Return the test results.
94
95 Returns:
96 (True, []) if the test passed.
97 (False, list_of_failures) if the test failed.
98 None if the test is still running.
99 """
100
101 test_running = self.runner_process and self.runner_process.poll() is None
102 if test_running:
103 return None
104 # Test is not running, now we have to check if we want to start it again.
105 if self._check_if_test_passed():
106 self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name)
107 return True, []
108 if self.max_test_runs_left == 0:
109 self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name)
110 return False, self.failures
111 self._run_test()
112 return None
113
114 def _check_if_test_passed(self):
115 """Returns True if and only if the test passed."""
116
117 success = False
118 if os.path.isfile(self.results_path):
119 with open(self.results_path, "r") as results:
120 # TODO(vabr): Parse the results to make sure all scenarios succeeded
121 # instead of hard-coding here the number of tests scenarios from
122 # test.py:main.
123 NUMBER_OF_TEST_SCENARIOS = 3
124 passed_scenarios = 0
125 for line in results:
126 self.failures.append(line)
127 passed_scenarios += line.count("successful='True'")
128 success = passed_scenarios == NUMBER_OF_TEST_SCENARIOS
129 if success:
130 break
131
132 self.logger.log(
133 SCRIPT_DEBUG,
134 "Test run of {0} has succeeded: {1}".format(self.test_name, success))
135 return success
136
137 def _run_test(self):
138 """Executes the command to run the test."""
139 with open(self.results_path, "w"):
140 pass # Just clear the results file.
141 shutil.rmtree(path=self.profile_path, ignore_errors=True)
142 self.max_test_runs_left -= 1
143 self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name)
144 self.runner_process = subprocess.Popen(self.test_cmd)
145 80
146 81
147 def _apply_defaults(config, defaults): 82 def ProcessRun(config, results):
vabr (Chromium) 2015/04/16 10:48:15 nit: Although "Process" is used as a verb in the n
melandory 2015/04/16 14:38:28 Done.
melandory 2015/04/16 14:38:28 Done.
148 """Adds default values from |defaults| to |config|. 83 """ Logs |results| of a test run. """
84 logger = logging.getLogger("run_tests")
85 failed_tests = []
86 failed_tests_num = 0
87 for result in results:
88 website, test_case, success, reason = result[0]
89 if not (config.save_only_fails and success):
90 logger.debug("Test case %s has %s on Website %s", test_case,
91 website, {True: "passed", False: "failed"}[success])
vabr (Chromium) 2015/04/16 10:48:16 I like this use of a dictionary! :)
melandory 2015/04/16 14:38:28 I also like this small nice things you can do in p
92 if not success:
93 logger.debug("Reason of failure: %s", reason)
149 94
150 Note: This differs from ConfigParser's mechanism for providing defaults in 95 if not success:
151 two aspects: 96 failed_tests.append("%s.%s" % (website, test_case))
152 * The "defaults" here become explicit, and are associated with sections. 97 failed_tests_num += 1
153 * Sections get created for the added defaults where needed, that is, if
154 they do not exist before.
155 98
156 Args: 99 logger.info("%d failed tests out of %d, failing tests: %s",
vabr (Chromium) 2015/04/16 10:48:15 nit: Please replace "tests" (both occurrences) wit
melandory 2015/04/16 14:38:28 Done.
157 config: A ConfigParser instance to be updated 100 failed_tests_num, len(results),
158 defaults: A dictionary mapping (section_string, option_string) pairs 101 [name for name in failed_tests])
vabr (Chromium) 2015/04/16 10:48:15 When you are at it, could you also sort the lost o
melandory 2015/04/16 14:38:29 Done.
159 to string values. For every section/option combination not already
160 contained in |config|, the value from |defaults| is stored in |config|.
161 """
162 for (section, option) in defaults:
163 if not config.has_section(section):
164 config.add_section(section)
165 if not config.has_option(section, option):
166 config.set(section, option, defaults[(section, option)])
167 102
168 103
169 def run_tests(config_path): 104 def RunTestCaseOnWebsite((website, test_case, config)):
vabr (Chromium) 2015/04/16 10:48:16 This has to take a one argument, which is a triple
melandory 2015/04/16 14:38:28 Exactly.
105 """ Runs a |test_case| on a |website|. If case when |test_case| has
vabr (Chromium) 2015/04/16 10:48:16 typo: If -> In
melandory 2015/04/16 14:38:29 Done.
106 failed it tries to rerun it. If run is too long, then it get stopped.
vabr (Chromium) 2015/04/16 10:48:16 nits: is too long -> takes too long get stopped ->
melandory 2015/04/16 14:38:28 Done.
107 """
108
109 profile_path = tempfile.mkdtemp()
110 # The tests can be flaky. This is why we try to rerun up to 3 times.
111 attempts = 3
112 result = ["", "", False, ""]
vabr (Chromium) 2015/04/16 10:48:15 Should this be a quadruple (,,,) instead of a list
melandory 2015/04/16 14:38:29 yep.
113 logger = logging.getLogger("run_tests")
114 for _ in xrange(attempts):
115 shutil.rmtree(path=profile_path, ignore_errors=True)
116 logger.log(SCRIPT_DEBUG, "Run of test case %s of website %s started",
117 test_case, website)
118 try:
119 with stopit.ThreadingTimeout(100) as timeout_status:
vabr (Chromium) 2015/04/16 10:48:16 Just to confirm that I understand how with with th
vabr (Chromium) 2015/04/16 10:48:16 nit: What about calling timeout_status just timeou
melandory 2015/04/16 14:38:28 Done.
melandory 2015/04/16 14:38:28 Yep.
120 logger.log(SCRIPT_DEBUG,
121 "Run test with parameters: %s %s %s %s %s %s",
122 config.chrome_path, config.chromedriver_path,
123 profile_path, config.passwords_path,
124 website, test_case)
125 result = tests.RunTest(config.chrome_path, config.chromedriver_path,
126 profile_path, config.passwords_path,
127 website, test_case)
128 if timeout_status.state != timeout_status.EXECUTED:
129 result = [[website, test_case, False, "Timeout"]]
vabr (Chromium) 2015/04/16 10:48:15 Also, why is result now a list of results (cf. lin
vabr (Chromium) 2015/04/16 10:48:16 Again, are results quadruples, or lists of length
melandory 2015/04/16 14:38:28 Done.
melandory 2015/04/16 14:38:29 Nope, no good reason. You're completely correct th
130 _, _, success, _ = result[0]
131 if success:
132 return result
133 except Exception as e:
134 result = [[website, test_case, False, e]]
135 return result
136
137
138 def RunTests(config_path):
170 """Runs automated tests. 139 """Runs automated tests.
171 140
172 Runs the tests and returns the results through logging: 141 Runs the tests and returns the results through logging:
173 On logging.INFO logging level, it returns the summary of how many tests 142 On logging.INFO logging level, it returns the summary of how many tests
174 passed and failed. 143 passed and failed.
175 On logging.DEBUG logging level, it returns the failure logs, if any. 144 On logging.DEBUG logging level, it returns the failure logs, if any.
176 (On SCRIPT_DEBUG it returns diagnostics for this script.) 145 (On SCRIPT_DEBUG it returns diagnostics for this script.)
177 146
178 Args: 147 Args:
179 config_path: The path to the config INI file. See the top of the file 148 config_path: The path to the config INI file. See the top of the file
180 for format description. 149 for format description.
181 """ 150 """
182 def has_test_run_finished(runner, result): 151 config = Config(config_path)
183 result = runner.get_test_result()
184 if result: # This test run is finished.
185 status, log = result
186 results.append((runner.test_name, status, log))
187 return True
188 else:
189 return False
190
191 defaults = {("run_options", "tests_in_parallel"): "1"}
192 config = ConfigParser.ConfigParser()
193 _apply_defaults(config, defaults)
194 config.read(config_path)
195 max_tests_in_parallel = config.getint("run_options", "tests_in_parallel")
196 full_path = os.path.realpath(__file__)
197 tests_dir = os.path.dirname(full_path)
198 tests_path = os.path.join(tests_dir, "tests.py")
199 test_name_idx = 2 # Index of "test_name_placeholder" below.
200 general_test_cmd = ["python", tests_path, "test_name_placeholder",
201 "--chrome-path", config.get("binaries", "chrome-path"),
202 "--chromedriver-path",
203 config.get("binaries", "chromedriver-path"),
204 "--passwords-path",
205 config.get("data_files", "passwords_path")]
206 runners = []
207 if config.has_option("run_options", "tests_to_run"):
208 tests_to_run = config.get("run_options", "tests_to_run").split(",")
209 else:
210 tests_to_run = tests.all_tests.keys()
211
212 if config.has_option("run_options", "test_cases_to_run"):
213 general_test_cmd += ["--test-cases-to-run",
214 config.get("run_options", "test_cases_to_run").replace(",", " ")]
215
216 if (config.has_option("logging", "save-only-fails") and
217 config.getboolean("logging", "save-only-fails")):
218 general_test_cmd.append("--save-only-fails")
219
220 logger = logging.getLogger("run_tests") 152 logger = logging.getLogger("run_tests")
221 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run), 153 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(config.tests_to_run),
222 tests_to_run) 154 config.tests_to_run)
223 results = [] # List of (name, bool_passed, failure_log). 155 data = [(website, test_case, config)
224 while len(runners) + len(tests_to_run) > 0: 156 for website in config.tests_to_run
225 runners = [runner for runner in runners if not has_test_run_finished( 157 for test_case in config.test_cases_to_run]
226 runner, results)] 158 number_of_processes = min([config.max_tests_in_parallel,
227 while len(runners) < max_tests_in_parallel and len(tests_to_run): 159 len(config.test_cases_to_run) *
228 test_name = tests_to_run.pop() 160 len(config.tests_to_run)])
229 specific_test_cmd = list(general_test_cmd) 161 p = multiprocessing.Pool(number_of_processes)
230 specific_test_cmd[test_name_idx] = test_name 162 results = p.map(RunTestCaseOnWebsite, data)
231 runners.append(TestRunner(specific_test_cmd, test_name)) 163 p.close()
232 time.sleep(1) 164 p.join()
233 failed_tests = [(name, log) for (name, passed, log) in results if not passed] 165 ProcessRun(config, results)
234 logger.info("%d failed tests out of %d, failing tests: %s",
235 len(failed_tests), len(results),
236 [name for (name, _) in failed_tests])
237 logger.debug("Logs of failing tests: %s", failed_tests)
238 166
239 167
240 def main(): 168 def main():
241 parser = argparse.ArgumentParser() 169 parser = argparse.ArgumentParser()
242 parser.add_argument("config_path", metavar="N", 170 parser.add_argument("config_path", metavar="N",
243 help="Path to the config.ini file.") 171 help="Path to the config.ini file.")
244 args = parser.parse_args() 172 args = parser.parse_args()
245 run_tests(args.config_path) 173 RunTests(args.config_path)
246 174
247 175
248 if __name__ == "__main__": 176 if __name__ == "__main__":
249 main() 177 main()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698