Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(426)

Side by Side Diff: components/test/data/password_manager/automated_tests/run_tests.py

Issue 1011363003: [Password manager Python tests] Switch to Python logging (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Added TODO(melandory) Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # -*- coding: utf-8 -*- 1 # -*- coding: utf-8 -*-
2 # Copyright 2014 The Chromium Authors. All rights reserved. 2 # Copyright 2014 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """This file allows the bots to be easily configured and run the tests. 6 """Encapsulates running tests defined in tests.py.
7 7
8 Running this script requires passing --config-path with a path to a config file 8 Running this script requires passing --config-path with a path to a config file
9 of the following structure: 9 of the following structure:
10 10
11 [data_files] 11 [data_files]
12 passwords_path=<path to a file with passwords> 12 passwords_path=<path to a file with passwords>
13 [binaries] 13 [binaries]
14 chrome-path=<chrome binary path> 14 chrome-path=<chrome binary path>
15 chromedriver-path=<chrome driver path> 15 chromedriver-path=<chrome driver path>
16 [run_options] 16 [run_options]
17 # |write_to_sheet| is optional, the default value is false.
18 write_to_sheet=[false|true]
19 # |tests_in_parallel| is optional, the default value is 1. 17 # |tests_in_parallel| is optional, the default value is 1.
20 tests_in_parallel=<number of parallel tests> 18 tests_in_parallel=<number of parallel tests>
21 # |tests_to_runs| field is optional, if it is absent all tests will be run. 19 # |tests_to_runs| field is optional, if it is absent all tests will be run.
22 tests_to_run=<test names to run, comma delimited> 20 tests_to_run=<test names to run, comma delimited>
23 [output]
24 # |save-path| is optional, the default value is /dev/null.
25 save-path=<file where to save result>
26 [sheet_info]
27 # This section is required only when write_to_sheet=true
28 pkey=full_path
29 client_email=email_assigned_by_google_dev_console
30 sheet_key=sheet_key_from_sheet_url
31 """ 21 """
32 from datetime import datetime 22 import argparse
33 import ConfigParser 23 import ConfigParser
34 import sys 24 import logging
35 import httplib2
36 import os 25 import os
37 import shutil 26 import shutil
38 import subprocess 27 import subprocess
39 import tempfile 28 import tempfile
40 import time 29 import time
41 sheet_libraries_import_error = None
42 try:
43 # TODO(vabr) Remove this dependency http://crbug.com/418485#c4.
44 from Sheet import Sheet
45 from apiclient.discovery import build
46 from gdata.gauth import OAuth2TokenFromCredentials
47 from gdata.spreadsheet.service import SpreadsheetsService
48 from oauth2client.client import SignedJwtAssertionCredentials
49 import oauth2client.tools
50 except ImportError as err:
51 sheet_libraries_import_error = err
52
53 30
54 from environment import Environment 31 from environment import Environment
55 import tests 32 import tests
56 33
57 _CREDENTIAL_SCOPES = "https://spreadsheets.google.com/feeds"
58 34
59 # TODO(dvadym) Change all prints in this file to correspond logging. 35 # Just below logging.DEBUG, use for this script's debug messages instead
36 # of logging.DEBUG, which is already used for detailed test debug messages.
37 SCRIPT_DEBUG = 9
60 38
61 # TODO(dvadym) Consider to move this class to separate file.
62 class SheetWriter(object):
63
64 def __init__(self, config):
65 self.write_to_sheet = config.getboolean("run_options", "write_to_sheet")
66 if not self.write_to_sheet:
67 return
68 if sheet_libraries_import_error:
69 raise sheet_libraries_import_error
70 self.pkey = config.get("sheet_info", "pkey")
71 self.client_email = config.get("sheet_info", "client_email")
72 self.sheet_key = config.get("sheet_info", "sheet_key")
73 _, self.access_token = self._authenticate()
74 self.sheet = self._spredsheeet_for_logging()
75
76 # TODO(melandory): Function _authenticate belongs to separate module.
77 def _authenticate(self):
78 http, token = None, None
79 with open(self.pkey) as pkey_file:
80 private_key = pkey_file.read()
81 credentials = SignedJwtAssertionCredentials(
82 self.client_email, private_key, _CREDENTIAL_SCOPES)
83 http = httplib2.Http()
84 http = credentials.authorize(http)
85 build("drive", "v2", http=http)
86 token = OAuth2TokenFromCredentials(credentials).access_token
87 return http, token
88
89 # TODO(melandory): Functionality of _spredsheeet_for_logging belongs
90 # to websitetests, because this way we do not need to write results of run
91 # in separate file and then read it here.
92 def _spredsheeet_for_logging(self):
93 """ Connects to document where result of test run will be logged. """
94 # Connect to trix
95 service = SpreadsheetsService(additional_headers={
96 "Authorization": "Bearer " + self.access_token})
97 sheet = Sheet(service, self.sheet_key)
98 return sheet
99
100 def write_line_to_sheet(self, data):
101 if not self.write_to_sheet:
102 return
103 try:
104 self.sheet.InsertRow(self.sheet.row_count, data)
105 except Exception:
106 pass # TODO(melandory): Sometimes writing to spreadsheet fails. We need
107 # to deal with it better that just ignoring it.
108 39
109 class TestRunner(object): 40 class TestRunner(object):
41 """Runs tests for a single website."""
110 42
111 def __init__(self, general_test_cmd, test_name): 43 def __init__(self, test_cmd, test_name):
112 """ Args: 44 """Initialize the TestRunner.
113 general_test_cmd: String contains part of run command common for all tests, 45
114 [2] is placeholder for test name. 46 Args:
115 test_name: Test name (facebook for example). 47 test_cmd: List of command line arguments to be supplied to
48 every test run.
49 test_name: Test name (e.g., facebook).
116 """ 50 """
51 self.logger = logging.getLogger("run_tests")
52
117 self.profile_path = tempfile.mkdtemp() 53 self.profile_path = tempfile.mkdtemp()
118 results = tempfile.NamedTemporaryFile(delete=False) 54 results = tempfile.NamedTemporaryFile(delete=False)
119 self.results_path = results.name 55 self.results_path = results.name
120 results.close() 56 results.close()
121 self.test_cmd = general_test_cmd + ["--profile-path", self.profile_path, 57 self.test_cmd = test_cmd + ["--profile-path", self.profile_path,
122 "--save-path", self.results_path] 58 "--save-path", self.results_path]
123 self.test_cmd[2] = self.test_name = test_name 59 self.test_name = test_name
124 # TODO(rchtara): Using "timeout is just temporary until a better, 60 # TODO(vabr): Ideally we would replace timeout with something allowing
125 # platform-independent solution is found. 61 # calling tests directly inside Python, and working on other platforms.
62 #
126 # The website test runs in two passes, each pass has an internal 63 # The website test runs in two passes, each pass has an internal
127 # timeout of 200s for waiting (see |remaining_time_to_wait| and 64 # timeout of 200s for waiting (see |remaining_time_to_wait| and
128 # Wait() in websitetest.py). Accounting for some more time spent on 65 # Wait() in websitetest.py). Accounting for some more time spent on
129 # the non-waiting execution, 300 seconds should be the upper bound on 66 # the non-waiting execution, 300 seconds should be the upper bound on
130 # the runtime of one pass, thus 600 seconds for the whole test. 67 # the runtime of one pass, thus 600 seconds for the whole test.
131 self.test_cmd = ["timeout", "600"] + self.test_cmd 68 self.test_cmd = ["timeout", "600"] + self.test_cmd
69
70 self.logger.log(SCRIPT_DEBUG,
71 "TestRunner set up for test %s, command '%s', "
72 "profile path %s, results file %s",
73 self.test_name, self.test_cmd, self.profile_path,
74 self.results_path)
75
132 self.runner_process = None 76 self.runner_process = None
133 # The tests can be flaky. This is why we try to rerun up to 3 times. 77 # The tests can be flaky. This is why we try to rerun up to 3 times.
134 self.max_test_runs_left = 3 78 self.max_test_runs_left = 3
135 self.failures = [] 79 self.failures = []
136 self._run_test() 80 self._run_test()
137 81
138 def get_test_result(self): 82 def get_test_result(self):
139 """ Return None if result is not ready yet.""" 83 """Return the test results.
84
85 Returns:
86 (True, []) if the test passed.
87 (False, list_of_failures) if the test failed.
88 None if the test is still running.
89 """
90
140 test_running = self.runner_process and self.runner_process.poll() is None 91 test_running = self.runner_process and self.runner_process.poll() is None
141 if test_running: return None 92 if test_running:
93 return None
142 # Test is not running, now we have to check if we want to start it again. 94 # Test is not running, now we have to check if we want to start it again.
143 if self._check_if_test_passed(): 95 if self._check_if_test_passed():
144 print "Test " + self.test_name + " passed" 96 self.logger.log(SCRIPT_DEBUG, "Test %s passed", self.test_name)
145 return "pass", [] 97 return True, []
146 if self.max_test_runs_left == 0: 98 if self.max_test_runs_left == 0:
147 print "Test " + self.test_name + " failed" 99 self.logger.log(SCRIPT_DEBUG, "Test %s failed", self.test_name)
148 return "fail", self.failures 100 return False, self.failures
149 self._run_test() 101 self._run_test()
150 return None 102 return None
151 103
152 def _check_if_test_passed(self): 104 def _check_if_test_passed(self):
105 """Returns True if and only if the test passed."""
153 if os.path.isfile(self.results_path): 106 if os.path.isfile(self.results_path):
154 results = open(self.results_path, "r") 107 with open(self.results_path, "r") as results:
155 count = 0 # Count the number of successful tests. 108 count = 0 # Count the number of successful tests.
156 for line in results: 109 for line in results:
157 # TODO(melandory): We do not need to send all this data to sheet. 110 self.failures.append(line)
158 self.failures.append(line) 111 count += line.count("successful='True'")
159 count += line.count("successful='True'") 112
160 results.close()
161 # There is only two tests running for every website: the prompt and 113 # There is only two tests running for every website: the prompt and
162 # the normal test. If both of the tests were successful, the tests 114 # the normal test. If both of the tests were successful, the tests
163 # would be stopped for the current website. 115 # would be stopped for the current website.
164 print "Test run of %s %s" % (self.test_name, "passed" 116 self.logger.log(SCRIPT_DEBUG, "Test run of %s: %s",
165 if count == 2 else "failed") 117 self.test_name, "pass" if count == 2 else "fail")
166 if count == 2: 118 if count == 2:
167 return True 119 return True
168 return False 120 return False
169 121
170 def _run_test(self): 122 def _run_test(self):
171 """Run separate process that once run test for one site.""" 123 """Executes the command to run the test."""
172 try: 124 with open(self.results_path, "w"):
173 os.remove(self.results_path) 125 pass # Just clear the results file.
174 except Exception: 126 shutil.rmtree(path=self.profile_path, ignore_errors=True)
175 pass
176 try:
177 shutil.rmtree(self.profile_path)
178 except Exception:
179 pass
180 self.max_test_runs_left -= 1 127 self.max_test_runs_left -= 1
181 print "Run of test %s started" % self.test_name 128 self.logger.log(SCRIPT_DEBUG, "Run of test %s started", self.test_name)
182 self.runner_process = subprocess.Popen(self.test_cmd) 129 self.runner_process = subprocess.Popen(self.test_cmd)
183 130
131
184 def _apply_defaults(config, defaults): 132 def _apply_defaults(config, defaults):
185 """Adds default values from |defaults| to |config|. 133 """Adds default values from |defaults| to |config|.
186 134
187 Note: This differs from ConfigParser's mechanism for providing defaults in 135 Note: This differs from ConfigParser's mechanism for providing defaults in
188 two aspects: 136 two aspects:
189 * The "defaults" here become explicit, and are associated with sections. 137 * The "defaults" here become explicit, and are associated with sections.
190 * Sections get created for the added defaults where needed, that is, if 138 * Sections get created for the added defaults where needed, that is, if
191 they do not exist before. 139 they do not exist before.
192 140
193 Args: 141 Args:
194 config: A ConfigParser instance to be updated 142 config: A ConfigParser instance to be updated
195 defaults: A dictionary mapping (section_string, option_string) pairs 143 defaults: A dictionary mapping (section_string, option_string) pairs
196 to string values. For every section/option combination not already 144 to string values. For every section/option combination not already
197 contained in |config|, the value from |defaults| is stored in |config|. 145 contained in |config|, the value from |defaults| is stored in |config|.
198 """ 146 """
199 for (section, option) in defaults: 147 for (section, option) in defaults:
200 if not config.has_section(section): 148 if not config.has_section(section):
201 config.add_section(section) 149 config.add_section(section)
202 if not config.has_option(section, option): 150 if not config.has_option(section, option):
203 config.set(section, option, defaults[(section, option)]) 151 config.set(section, option, defaults[(section, option)])
204 152
153
205 def run_tests(config_path): 154 def run_tests(config_path):
206 """ Runs automated tests. """ 155 """Runs automated tests.
156
157 Runs the tests and returns the results through logging:
158 On logging.INFO logging level, it returns the summary of how many tests
159 passed and failed.
160 On logging.DEBUG logging level, it returns the failure logs, if any.
161 (On SCRIPT_DEBUG it returns diagnostics for this script.)
162
163 Args:
164 config_path: The path to the config INI file. See the top of the file
165 for format description.
166 """
167
207 environment = Environment("", "", "", None, False) 168 environment = Environment("", "", "", None, False)
208 defaults = { ("output", "save-path"): "/dev/null", 169 defaults = {("run_options", "tests_in_parallel"): "1"}
209 ("run_options", "tests_in_parallel"): "1",
210 ("run_options", "write_to_sheet"): "false" }
211 config = ConfigParser.ConfigParser() 170 config = ConfigParser.ConfigParser()
212 _apply_defaults(config, defaults) 171 _apply_defaults(config, defaults)
213 config.read(config_path) 172 config.read(config_path)
214 date = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
215 max_tests_in_parallel = config.getint("run_options", "tests_in_parallel") 173 max_tests_in_parallel = config.getint("run_options", "tests_in_parallel")
216 sheet_writer = SheetWriter(config)
217 full_path = os.path.realpath(__file__) 174 full_path = os.path.realpath(__file__)
218 tests_dir = os.path.dirname(full_path) 175 tests_dir = os.path.dirname(full_path)
219 tests_path = os.path.join(tests_dir, "tests.py") 176 tests_path = os.path.join(tests_dir, "tests.py")
220 general_test_cmd = ["python", tests_path, "test_name_placeholder", 177 test_name_idx = 2 # Index of "test_name_placeholder" below.
221 "--chrome-path", config.get("binaries", "chrome-path"), 178 general_test_cmd = ["python", tests_path, "test_name_placeholder",
222 "--chromedriver-path", config.get("binaries", "chromedriver-path"), 179 "--chrome-path", config.get("binaries", "chrome-path"),
223 "--passwords-path", config.get("data_files", "passwords_path")] 180 "--chromedriver-path",
181 config.get("binaries", "chromedriver-path"),
182 "--passwords-path",
183 config.get("data_files", "passwords_path")]
224 runners = [] 184 runners = []
225 if config.has_option("run_options", "tests_to_run"): 185 if config.has_option("run_options", "tests_to_run"):
226 user_selected_tests = config.get("run_options", "tests_to_run").split(',') 186 user_selected_tests = config.get("run_options", "tests_to_run").split(",")
227 tests_to_run = user_selected_tests 187 tests_to_run = user_selected_tests
228 else: 188 else:
229 tests.Tests(environment) 189 tests.Tests(environment)
230 tests_to_run = [test.name for test in environment.websitetests] 190 tests_to_run = [test.name for test in environment.websitetests]
231 191
232 with open(config.get("output", "save-path"), 'w') as savefile: 192 logger = logging.getLogger("run_tests")
233 print "Tests to run %d\nTests: %s" % (len(tests_to_run), tests_to_run) 193 logger.log(SCRIPT_DEBUG, "%d tests to run: %s", len(tests_to_run),
234 while len(runners) + len(tests_to_run) > 0: 194 tests_to_run)
235 i = 0 195 results = [] # List of (name, bool_passed, failure_log).
236 while i < len(runners): 196 while len(runners) + len(tests_to_run) > 0:
237 result = runners[i].get_test_result() 197 i = 0
238 if result: # This test run is finished. 198 # TODO(melandory): Rewrite with list comprehension to increase readability.
239 status, log = result 199 while i < len(runners):
240 testinfo = [runners[i].test_name, status, date, " | ".join(log)] 200 result = runners[i].get_test_result()
241 sheet_writer.write_line_to_sheet(testinfo) 201 if result: # This test run is finished.
242 print>>savefile, " ".join(testinfo) 202 status, log = result
243 del runners[i] 203 results.append((runners[i].test_name, status, log))
244 else: 204 del runners[i]
245 i += 1 205 else:
246 while len(runners) < max_tests_in_parallel and len(tests_to_run) > 0: 206 i += 1
247 runners.append(TestRunner(general_test_cmd, tests_to_run.pop())) 207 while len(runners) < max_tests_in_parallel and len(tests_to_run):
248 time.sleep(1) # Let us wait for worker process to finish. 208 test_name = tests_to_run.pop()
209 specific_test_cmd = list(general_test_cmd)
210 specific_test_cmd[test_name_idx] = test_name
211 runners.append(TestRunner(specific_test_cmd, test_name))
212 time.sleep(1)
213 failed_tests = [(name, log) for (name, passed, log) in results if not passed]
214 logger.info("%d failed tests out of %d", len(failed_tests), len(results))
215 logger.info("Failing tests: %s", [name for (name, _) in failed_tests])
216 logger.debug("Logs of failing tests: %s", failed_tests)
217
218
219 def main():
220 parser = argparse.ArgumentParser()
221 parser.add_argument("config_path", metavar="N",
222 help="Path to the config.ini file.")
223 args = parser.parse_args()
224 run_tests(args.config_path)
225
249 226
250 if __name__ == "__main__": 227 if __name__ == "__main__":
251 if len(sys.argv) != 2: 228 main()
252 print "Synopsis:\n python run_tests.py <config_path>"
253 config_path = sys.argv[1]
254 run_tests(config_path)
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698