OLD | NEW |
1 # -*- coding: utf-8 -*- | 1 # -*- coding: utf-8 -*- |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | 2 # Copyright 2014 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Encapsulates running tests defined in tests.py. | 6 """Encapsulates running tests defined in tests.py. |
7 | 7 |
8 Running this script requires passing --config-path with a path to a config file | 8 Running this script requires passing --config-path with a path to a config file |
9 of the following structure: | 9 of the following structure: |
10 | 10 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
60 self.profile_path = tempfile.mkdtemp() | 60 self.profile_path = tempfile.mkdtemp() |
61 results = tempfile.NamedTemporaryFile(delete=False) | 61 results = tempfile.NamedTemporaryFile(delete=False) |
62 self.results_path = results.name | 62 self.results_path = results.name |
63 results.close() | 63 results.close() |
64 self.test_cmd = test_cmd + ["--profile-path", self.profile_path, | 64 self.test_cmd = test_cmd + ["--profile-path", self.profile_path, |
65 "--save-path", self.results_path] | 65 "--save-path", self.results_path] |
66 self.test_name = test_name | 66 self.test_name = test_name |
67 # TODO(vabr): Ideally we would replace timeout with something allowing | 67 # TODO(vabr): Ideally we would replace timeout with something allowing |
68 # calling tests directly inside Python, and working on other platforms. | 68 # calling tests directly inside Python, and working on other platforms. |
69 # | 69 # |
70 # The website test runs in two passes, each pass has an internal | 70 # The website test runs multiple scenarios, each one has an internal |
71 # timeout of 200s for waiting (see |remaining_time_to_wait| and | 71 # timeout of 200s for waiting (see |remaining_time_to_wait| and |
72 # Wait() in websitetest.py). Accounting for some more time spent on | 72 # Wait() in websitetest.py). Expecting that not every scenario should |
73 # the non-waiting execution, 300 seconds should be the upper bound on | 73 # take 200s, the maximum time allocated for all of them is 300s. |
74 # the runtime of one pass, thus 600 seconds for the whole test. | 74 self.test_cmd = ["timeout", "300"] + self.test_cmd |
75 self.test_cmd = ["timeout", "600"] + self.test_cmd | |
76 | 75 |
77 self.logger.log(SCRIPT_DEBUG, | 76 self.logger.log(SCRIPT_DEBUG, |
78 "TestRunner set up for test %s, command '%s', " | 77 "TestRunner set up for test %s, command '%s', " |
79 "profile path %s, results file %s", | 78 "profile path %s, results file %s", |
80 self.test_name, self.test_cmd, self.profile_path, | 79 self.test_name, self.test_cmd, self.profile_path, |
81 self.results_path) | 80 self.results_path) |
82 | 81 |
83 self.runner_process = None | 82 self.runner_process = None |
84 # The tests can be flaky. This is why we try to rerun up to 3 times. | 83 # The tests can be flaky. This is why we try to rerun up to 3 times. |
85 self.max_test_runs_left = 3 | 84 self.max_test_runs_left = 3 |
(...skipping 17 matching lines...) Expand all Loading... |
103 self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name) | 102 self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name) |
104 return True, [] | 103 return True, [] |
105 if self.max_test_runs_left == 0: | 104 if self.max_test_runs_left == 0: |
106 self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name) | 105 self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name) |
107 return False, self.failures | 106 return False, self.failures |
108 self._run_test() | 107 self._run_test() |
109 return None | 108 return None |
110 | 109 |
111 def _check_if_test_passed(self): | 110 def _check_if_test_passed(self): |
112 """Returns True if and only if the test passed.""" | 111 """Returns True if and only if the test passed.""" |
| 112 |
| 113 success = False |
113 if os.path.isfile(self.results_path): | 114 if os.path.isfile(self.results_path): |
114 with open(self.results_path, "r") as results: | 115 with open(self.results_path, "r") as results: |
115 count = 0 # Count the number of successful tests. | 116 # TODO(vabr): Parse the results to make sure all scenarios succeeded |
| 117 # instead of hard-coding here the number of tests scenarios from |
| 118 # test.py:main. |
| 119 NUMBER_OF_TEST_SCENARIOS = 3 |
| 120 passed_scenarios = 0 |
116 for line in results: | 121 for line in results: |
117 self.failures.append(line) | 122 self.failures.append(line) |
118 count += line.count("successful='True'") | 123 passed_scenarios += line.count("successful='True'") |
| 124 success = passed_scenarios == NUMBER_OF_TEST_SCENARIOS |
| 125 if success: |
| 126 break |
119 | 127 |
120 # There is only two tests running for every website: the prompt and | 128 self.logger.log( |
121 # the normal test. If both of the tests were successful, the tests | 129 SCRIPT_DEBUG, |
122 # would be stopped for the current website. | 130 "Test run of {0} succeded: {1}".format(self.test_name, success)) |
123 self.logger.log(SCRIPT_DEBUG, "Test run of %s: %s", | 131 return success |
124 self.test_name, "pass" if count == 2 else "fail") | |
125 if count == 2: | |
126 return True | |
127 return False | |
128 | 132 |
129 def _run_test(self): | 133 def _run_test(self): |
130 """Executes the command to run the test.""" | 134 """Executes the command to run the test.""" |
131 with open(self.results_path, "w"): | 135 with open(self.results_path, "w"): |
132 pass # Just clear the results file. | 136 pass # Just clear the results file. |
133 shutil.rmtree(path=self.profile_path, ignore_errors=True) | 137 shutil.rmtree(path=self.profile_path, ignore_errors=True) |
134 self.max_test_runs_left -= 1 | 138 self.max_test_runs_left -= 1 |
135 self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name) | 139 self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name) |
136 self.runner_process = subprocess.Popen(self.test_cmd) | 140 self.runner_process = subprocess.Popen(self.test_cmd) |
137 | 141 |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
225 def main(): | 229 def main(): |
226 parser = argparse.ArgumentParser() | 230 parser = argparse.ArgumentParser() |
227 parser.add_argument("config_path", metavar="N", | 231 parser.add_argument("config_path", metavar="N", |
228 help="Path to the config.ini file.") | 232 help="Path to the config.ini file.") |
229 args = parser.parse_args() | 233 args = parser.parse_args() |
230 run_tests(args.config_path) | 234 run_tests(args.config_path) |
231 | 235 |
232 | 236 |
233 if __name__ == "__main__": | 237 if __name__ == "__main__": |
234 main() | 238 main() |
OLD | NEW |