OLD | NEW |
1 # -*- coding: utf-8 -*- | 1 # -*- coding: utf-8 -*- |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | 2 # Copyright 2014 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Encapsulates running tests defined in tests.py. | 6 """Encapsulates running tests defined in tests.py. |
7 | 7 |
8 Running this script requires passing --config-path with a path to a config file | 8 Running this script requires passing --config-path with a path to a config file |
9 of the following structure: | 9 of the following structure: |
10 | 10 |
11 [data_files] | 11 [data_files] |
12 passwords_path=<path to a file with passwords> | 12 passwords_path=<path to a file with passwords> |
13 [binaries] | 13 [binaries] |
14 chrome-path=<chrome binary path> | 14 chrome-path=<chrome binary path> |
15 chromedriver-path=<chrome driver path> | 15 chromedriver-path=<chrome driver path> |
16 [run_options] | 16 [run_options] |
17 # |tests_in_parallel| is optional, the default value is 1. | 17 # |tests_in_parallel| is optional, the default value is 1. |
18 tests_in_parallel=<number of parallel tests> | 18 tests_in_parallel=<number of parallel tests> |
19 # |tests_to_runs| field is optional, if it is absent all tests will be run. | 19 # |tests_to_runs| field is optional, if it is absent all tests will be run. |
20 tests_to_run=<test names to run, comma delimited> | 20 tests_to_run=<test names to run, comma delimited> |
| 21 # |test_cases_to_run| field is optional, if it is absent all test cases |
| 22 # will be run. |
| 23 test_cases_to_run=<test names to run, comma delimited> |
21 [logging] | 24 [logging] |
22 # |save-only-failures| is oprional, the default is false. | 25 # |save-only-failures| is oprional, the default is false. |
23 save-only-failures=<Boolean parameter which enforces saving results of only | 26 save-only-failures=<Boolean parameter which enforces saving results of only |
24 failed tests> | 27 failed tests> |
25 | 28 |
26 The script uses the Python's logging library to report the test results, | 29 The script uses the Python's logging library to report the test results, |
27 as well as debugging information. It emits three levels of logs (in | 30 as well as debugging information. It emits three levels of logs (in |
28 descending order of severity): | 31 descending order of severity): |
29 logging.INFO: Summary of the tests. | 32 logging.INFO: Summary of the tests. |
30 logging.DEBUG: Details about tests failures. | 33 logging.DEBUG: Details about tests failures. |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
206 config.get("data_files", "passwords_path")] | 209 config.get("data_files", "passwords_path")] |
207 runners = [] | 210 runners = [] |
208 if config.has_option("run_options", "tests_to_run"): | 211 if config.has_option("run_options", "tests_to_run"): |
209 tests_to_run = config.get("run_options", "tests_to_run").split(",") | 212 tests_to_run = config.get("run_options", "tests_to_run").split(",") |
210 else: | 213 else: |
211 tests_to_run = tests.all_tests.keys() | 214 tests_to_run = tests.all_tests.keys() |
212 if (config.has_option("logging", "save-only-failures") and | 215 if (config.has_option("logging", "save-only-failures") and |
213 config.getboolean("logging", "save-only-failures")): | 216 config.getboolean("logging", "save-only-failures")): |
214 general_test_cmd.append("--save-only-failures") | 217 general_test_cmd.append("--save-only-failures") |
215 | 218 |
| 219 if config.has_option("run_options", "test_cases_to_run"): |
| 220 general_test_cmd += ["--test-cases-to-run", |
| 221 config.get("run_options", "test_cases_to_run").replace(",", " ")] |
| 222 |
216 logger = logging.getLogger("run_tests") | 223 logger = logging.getLogger("run_tests") |
217 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run), | 224 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run), |
218 tests_to_run) | 225 tests_to_run) |
219 results = [] # List of (name, bool_passed, failure_log). | 226 results = [] # List of (name, bool_passed, failure_log). |
220 while len(runners) + len(tests_to_run) > 0: | 227 while len(runners) + len(tests_to_run) > 0: |
221 runners = [runner for runner in runners if not has_test_run_finished( | 228 runners = [runner for runner in runners if not has_test_run_finished( |
222 runner, results)] | 229 runner, results)] |
223 while len(runners) < max_tests_in_parallel and len(tests_to_run): | 230 while len(runners) < max_tests_in_parallel and len(tests_to_run): |
224 test_name = tests_to_run.pop() | 231 test_name = tests_to_run.pop() |
225 specific_test_cmd = list(general_test_cmd) | 232 specific_test_cmd = list(general_test_cmd) |
(...skipping 10 matching lines...) Expand all Loading... |
236 def main(): | 243 def main(): |
237 parser = argparse.ArgumentParser() | 244 parser = argparse.ArgumentParser() |
238 parser.add_argument("config_path", metavar="N", | 245 parser.add_argument("config_path", metavar="N", |
239 help="Path to the config.ini file.") | 246 help="Path to the config.ini file.") |
240 args = parser.parse_args() | 247 args = parser.parse_args() |
241 run_tests(args.config_path) | 248 run_tests(args.config_path) |
242 | 249 |
243 | 250 |
244 if __name__ == "__main__": | 251 if __name__ == "__main__": |
245 main() | 252 main() |
OLD | NEW |