OLD | NEW |
(Empty) | |
| 1 # Copyright 2011 Google Inc. |
| 2 # |
| 3 # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 # you may not use this file except in compliance with the License. |
| 5 # You may obtain a copy of the License at |
| 6 # |
| 7 # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 # |
| 9 # Unless required by applicable law or agreed to in writing, software |
| 10 # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 # See the License for the specific language governing permissions and |
| 13 # limitations under the License. |
| 14 |
| 15 import subprocess |
| 16 import unittest |
| 17 import os |
| 18 import sys |
| 19 import re |
| 20 import time |
| 21 import getpass |
| 22 import platform |
| 23 |
| 24 from gslib.command import Command |
| 25 from gslib.command import COMMAND_NAME |
| 26 from gslib.command import COMMAND_NAME_ALIASES |
| 27 from gslib.command import CONFIG_REQUIRED |
| 28 from gslib.command import FILE_URIS_OK |
| 29 from gslib.command import MAX_ARGS |
| 30 from gslib.command import MIN_ARGS |
| 31 from gslib.command import PROVIDER_URIS_OK |
| 32 from gslib.command import SUPPORTED_SUB_ARGS |
| 33 from gslib.command import URIS_START_ARG |
| 34 from gslib.exception import CommandException |
| 35 from gslib.help_provider import HELP_NAME |
| 36 from gslib.help_provider import HELP_NAME_ALIASES |
| 37 from gslib.help_provider import HELP_ONE_LINE_SUMMARY |
| 38 from gslib.help_provider import HELP_TEXT |
| 39 from gslib.help_provider import HelpType |
| 40 from gslib.help_provider import HELP_TYPE |
| 41 from gslib.util import NO_MAX |
| 42 from tests.integration.s3.mock_storage_service import MockBucketStorageUri |
| 43 |
| 44 _detailed_help_text = (""" |
| 45 <B>SYNOPSIS</B> |
| 46 gsutil test [command command...] |
| 47 |
| 48 |
| 49 <B>DESCRIPTION</B> |
| 50 The gsutil test command runs end-to-end tests of gsutil commands (i.e., |
| 51 tests that send requests to the production service. This stands in contrast |
| 52 to tests that use an in-memory mock storage service implementation (see |
| 53 "gsutil help dev" for more details on the latter). |
| 54 |
| 55 To run all end-to-end tests run the command with no arguments: |
| 56 |
| 57 gsutil test |
| 58 |
| 59 To see additional details for test failures: |
| 60 |
| 61 gsutil -d test |
| 62 |
| 63 To run tests for one or more individual commands add those commands as |
| 64 arguments. For example: |
| 65 |
| 66 gsutil test cp mv |
| 67 |
| 68 will run the cp and mv command tests. |
| 69 |
| 70 Note: the end-to-end tests are defined in the code for each command (e.g., |
| 71 cp end-to-end tests are in gslib/commands/cp.py). See the comments around |
| 72 'test_steps' in each of the Command subclasses. |
| 73 """) |
| 74 |
| 75 |
| 76 class TestCommand(Command): |
| 77 """Implementation of gsutil test command.""" |
| 78 |
| 79 # Command specification (processed by parent class). |
| 80 command_spec = { |
| 81 # Name of command. |
| 82 COMMAND_NAME : 'test', |
| 83 # List of command name aliases. |
| 84 COMMAND_NAME_ALIASES : [], |
| 85 # Min number of args required by this command. |
| 86 MIN_ARGS : 0, |
| 87 # Max number of args required by this command, or NO_MAX. |
| 88 MAX_ARGS : NO_MAX, |
| 89 # Getopt-style string specifying acceptable sub args. |
| 90 SUPPORTED_SUB_ARGS : '', |
| 91 # True if file URIs acceptable for this command. |
| 92 FILE_URIS_OK : True, |
| 93 # True if provider-only URIs acceptable for this command. |
| 94 PROVIDER_URIS_OK : False, |
| 95 # Index in args of first URI arg. |
| 96 URIS_START_ARG : 0, |
| 97 # True if must configure gsutil before running command. |
| 98 CONFIG_REQUIRED : True, |
| 99 } |
| 100 help_spec = { |
| 101 # Name of command or auxiliary help info for which this help applies. |
| 102 HELP_NAME : 'test', |
| 103 # List of help name aliases. |
| 104 HELP_NAME_ALIASES : [], |
| 105 # Type of help: |
| 106 HELP_TYPE : HelpType.COMMAND_HELP, |
| 107 # One line summary of this help. |
| 108 HELP_ONE_LINE_SUMMARY : 'Run end to end gsutil tests', |
| 109 # The full help text. |
| 110 HELP_TEXT : _detailed_help_text, |
| 111 } |
| 112 |
| 113 # Define constants & class attributes for command testing. |
| 114 username = getpass.getuser().lower() |
| 115 _test_prefix_file = 'gsutil_test_file_' + username + '_' |
| 116 _test_prefix_bucket = 'gsutil_test_bucket_' + username + '_' |
| 117 _test_prefix_object = 'gsutil_test_object_' + username + '_' |
| 118 # Replacement regexps for format specs in test_steps values. |
| 119 _test_replacements = {} |
| 120 # Cache for whether system shell is pipefail capable |
| 121 _pipefail_capable = None |
| 122 |
| 123 def _TestRunner(self, cmd, debug): |
| 124 """Run a test command in a subprocess and return result. If debugging |
| 125 requested, display the command, otherwise redirect stdout & stderr |
| 126 to /dev/null. |
| 127 """ |
| 128 if not debug and '>' not in cmd: |
| 129 cmd += ' >/dev/null 2>&1' |
| 130 # Set pipefail when bash is available. Otherwise, errors might be ignored |
| 131 # silently in the case of commands like `gsutil cp foo bar | wc -l`. Without |
| 132 # pipefail being set, gsutil could crash but a test might still pass. |
| 133 if self.pipefail_capable(): |
| 134 cmd = 'set -o pipefail; ' + cmd |
| 135 if debug: |
| 136 print 'cmd:', cmd |
| 137 return subprocess.call(cmd, shell=True) |
| 138 |
| 139 def global_setup(self, debug, num_buckets=10): |
| 140 """General test setup. |
| 141 |
| 142 For general testing use create up to three buckets, one empty, one |
| 143 containing one object and one containing two objects. Also create |
| 144 three files for general use. Also initialize _test_replacements. |
| 145 """ |
| 146 print 'Global setup started...' |
| 147 |
| 148 self._test_replacements = { |
| 149 r'\$B(\d)' : self._test_prefix_bucket + r'\1', |
| 150 r'\$O(\d)' : self._test_prefix_object + r'\1', |
| 151 r'\$F(\d)' : self._test_prefix_file + r'\1', |
| 152 r'\$G' : self.gsutil_bin_dir, |
| 153 } |
| 154 |
| 155 # Build lists of buckets and files. |
| 156 bucket_list = ['gs://$B%d' % i for i in range(0, num_buckets)] |
| 157 file_list = ['$F%d' % i for i in range(0, 3)] |
| 158 |
| 159 # Create test buckets. |
| 160 bucket_cmd = self.gsutil_cmd + ' mb ' + ' '.join(bucket_list) |
| 161 bucket_cmd = self.sub_format_specs(bucket_cmd) |
| 162 self._TestRunner(bucket_cmd, debug) |
| 163 |
| 164 # Create test objects - zero in first bucket, one in second, two in third. |
| 165 for i in range(0, min(3, num_buckets)): |
| 166 for j in range(0, i): |
| 167 object_cmd = ('echo test | ' + self.gsutil_cmd + |
| 168 ' cp - gs://$B%d/$O%d' % (i, j)) |
| 169 object_cmd = self.sub_format_specs(object_cmd) |
| 170 self._TestRunner(object_cmd, debug) |
| 171 |
| 172 # Create three test files of size 10MB each. |
| 173 for file in file_list: |
| 174 file = self.sub_format_specs(file) |
| 175 f = open(file, 'w') |
| 176 f.write(os.urandom(10**6)) |
| 177 f.close() |
| 178 |
| 179 print 'Global setup completed.' |
| 180 |
| 181 def global_teardown(self, debug, num_buckets=10): |
| 182 """General test cleanup. |
| 183 |
| 184 Remove all buckets, objects and files used by this test facility. |
| 185 """ |
| 186 print 'Global teardown started...' |
| 187 # Build commands to remove objects, buckets and files. |
| 188 bucket_list = ['gs://$B%d' % i for i in range(0, num_buckets)] |
| 189 object_list = ['gs://$B%d/*' % i for i in range(0, num_buckets)] |
| 190 file_list = ['$F%d' % i for i in range(0, num_buckets)] |
| 191 bucket_cmd = self.gsutil_cmd + ' rb ' + ' '.join(bucket_list) |
| 192 object_cmd = self.gsutil_cmd + ' rm -af ' + ' '.join(object_list) |
| 193 for f in file_list: |
| 194 f = self.sub_format_specs(f) |
| 195 if os.path.exists(f): |
| 196 os.unlink(f) |
| 197 |
| 198 # Substitute format specifiers ($Bn, $On, $Fn, $G). |
| 199 bucket_cmd = self.sub_format_specs(bucket_cmd) |
| 200 if not debug: |
| 201 bucket_cmd += ' >/dev/null 2>&1' |
| 202 object_cmd = self.sub_format_specs(object_cmd) |
| 203 if not debug: |
| 204 object_cmd += ' >/dev/null 2>&1' |
| 205 |
| 206 # Run the commands. |
| 207 self._TestRunner(object_cmd, debug) |
| 208 self._TestRunner(bucket_cmd, debug) |
| 209 |
| 210 print 'Global teardown completed.' |
| 211 |
| 212 # Command entry point. |
| 213 def RunCommand(self): |
| 214 |
| 215 # To avoid testing aliases, we keep track of previous tests. |
| 216 already_tested = {} |
| 217 |
| 218 self.gsutil_cmd = '' |
| 219 # If running on Windows, invoke python interpreter explicitly. |
| 220 if platform.system() == "Windows": |
| 221 self.gsutil_cmd += 'python ' |
| 222 # Add full path to gsutil to make sure we test the correct version. |
| 223 self.gsutil_cmd += os.path.join(self.gsutil_bin_dir, 'gsutil') |
| 224 |
| 225 # Set sim option on exec'ed commands if user requested mock provider. |
| 226 if issubclass(self.bucket_storage_uri_class, MockBucketStorageUri): |
| 227 self.gsutil_cmd += ' -s' |
| 228 |
| 229 # Instantiate test generator for creating test functions on the fly. |
| 230 gen = test_generator() |
| 231 |
| 232 # Set list of commands to test to include user supplied commands or all |
| 233 # commands if none specified by user ('gsutil test' implies test all). |
| 234 commands_to_test = [] |
| 235 if self.args: |
| 236 for name in self.args: |
| 237 if name in self.command_runner.command_map: |
| 238 commands_to_test.append(name) |
| 239 else: |
| 240 raise CommandException('Test requested for unknown command %s.' |
| 241 % name) |
| 242 else: |
| 243 # No commands specified so test all commands. |
| 244 commands_to_test = self.command_runner.command_map.keys() |
| 245 |
| 246 t0 = time.time() |
| 247 test_results = [] |
| 248 |
| 249 for name in commands_to_test: |
| 250 cmd = self.command_runner.command_map[name] |
| 251 |
| 252 # Skip this command if test steps not defined or empty. |
| 253 if not hasattr(cmd, 'test_steps') or not cmd.test_steps: |
| 254 if self.debug: |
| 255 print 'Skipping %s command because no test steps defined.' % name |
| 256 continue |
| 257 |
| 258 # Skip aliases for commands we've already tested. |
| 259 if cmd in already_tested: |
| 260 continue |
| 261 already_tested[cmd] = 1 |
| 262 |
| 263 # Figure out how many buckets we'll need. |
| 264 num_test_buckets = 3 |
| 265 if hasattr(cmd, 'num_test_buckets'): |
| 266 num_test_buckets = cmd.num_test_buckets |
| 267 |
| 268 # Run global test setup. |
| 269 self.global_setup(self.debug, num_test_buckets) |
| 270 |
| 271 # If command has a test_setup method, run per command setup here. |
| 272 if hasattr(cmd, 'test_setup'): |
| 273 cmd.test_setup(self.debug) |
| 274 |
| 275 # Instantiate a test suite, which we'll dynamically add tests to. |
| 276 suite = unittest.TestSuite() |
| 277 |
| 278 # Iterate over the entries in this command's test specification. |
| 279 for (cmdname, cmdline, expect_ret, diff) in cmd.test_steps: |
| 280 cmdline = cmdline.replace('gsutil ', self.gsutil_cmd + ' ') |
| 281 if platform.system() == 'Windows': |
| 282 cmdline = cmdline.replace('cat ', 'type ') |
| 283 |
| 284 # Store file names requested for diff. |
| 285 result_file = None |
| 286 expect_file = None |
| 287 if diff: |
| 288 (result_file, expect_file) = diff |
| 289 |
| 290 # Substitute format specifiers ($Bn, $On, $Fn). |
| 291 cmdline = self.sub_format_specs(cmdline) |
| 292 result_file = self.sub_format_specs(result_file) |
| 293 expect_file = self.sub_format_specs(expect_file) |
| 294 |
| 295 # Generate test function, wrap in a test case and add to test suite. |
| 296 func = gen.genTest(self._TestRunner, cmdline, expect_ret, |
| 297 result_file, expect_file, self.debug) |
| 298 test_case = unittest.FunctionTestCase(func, description=cmdname) |
| 299 suite.addTest(test_case) |
| 300 |
| 301 # Run the tests we've just accumulated. |
| 302 print 'Running %s tests for %s command.' % (suite.countTestCases(), name) |
| 303 test_result = unittest.TextTestRunner(verbosity=2).run(suite) |
| 304 test_results.append(test_result) |
| 305 |
| 306 # If command has a test_teardown method, run per command teardown here. |
| 307 if hasattr(cmd, 'test_teardown'): |
| 308 cmd.test_teardown(self.debug) |
| 309 |
| 310 # Run global test teardown. |
| 311 self.global_teardown(self.debug, num_test_buckets) |
| 312 |
| 313 t1 = time.time() |
| 314 num_failures = sum(len(result.failures) for result in test_results) |
| 315 num_errors = sum(len(result.errors) for result in test_results) |
| 316 num_run = sum(result.testsRun for result in test_results) |
| 317 passed = all(result.wasSuccessful() for result in test_results) |
| 318 |
| 319 print |
| 320 print '-' * 80 |
| 321 print ('Ran %d tests from %d test suites in %.7g seconds' % |
| 322 (num_run, len(test_results), t1-t0)) |
| 323 print '%d failures, %d errors' % (num_failures, num_errors) |
| 324 print |
| 325 if passed: |
| 326 print 'ALL TESTS PASS' |
| 327 return 0 |
| 328 else: |
| 329 print 'FAIL' |
| 330 return 1 |
| 331 |
| 332 def pipefail_capable(self): |
| 333 """Check whether the system's shell is capable of setting the pipefail |
| 334 attribute. Returns True if capable, False otherwise. Only runs the actual |
| 335 shell once so subsequent calls are cached. |
| 336 """ |
| 337 if TestCommand._pipefail_capable is None: |
| 338 if ('win32' not in str(sys.platform).lower() and |
| 339 subprocess.call('set -o pipefail', shell=True) == 0): |
| 340 TestCommand._pipefail_capable = True |
| 341 else: |
| 342 TestCommand._pipefail_capable = False |
| 343 return TestCommand._pipefail_capable |
| 344 |
| 345 def sub_format_specs(self, s): |
| 346 """Perform iterative regexp substitutions on passed string. |
| 347 |
| 348 This method iteratively substitutes values in a passed string, |
| 349 returning the modified string when done. |
| 350 """ |
| 351 # Don't bother if the passed string is empty or None. |
| 352 if s: |
| 353 for (template, repl_str) in self._test_replacements.items(): |
| 354 while re.search(template, s): |
| 355 # Keep substituting as long as the template is found. |
| 356 s = re.sub(template, repl_str, s) |
| 357 return s |
| 358 |
| 359 |
| 360 class test_generator(unittest.TestCase): |
| 361 """Dynamic test generator for use with unittest module. |
| 362 |
| 363 This class is used to generate a test case function. It |
| 364 inherits from unittest.TestCase so that it has access to |
| 365 all the TestCase componentry (e.g. self.assertEqual, etc.). |
| 366 """ |
| 367 |
| 368 def runTest(): |
| 369 """Required method to instantiate unittest.TestCase derived class.""" |
| 370 pass |
| 371 |
| 372 def genTest(self, runner, cmd, expect_ret, result_file, expect_file, debug): |
| 373 """Create and return a function to execute unittest module test cases. |
| 374 |
| 375 This method generates a test function based on the passed |
| 376 input and some inherited methods and returns the generated |
| 377 function to the caller. |
| 378 """ |
| 379 |
| 380 def test_func(): |
| 381 # Run the test command and capture the result in ret. |
| 382 ret = runner(cmd, debug) |
| 383 if expect_ret is not None: |
| 384 # If an expected return code was passed, make sure we got it. |
| 385 self.assertEqual(ret, expect_ret) |
| 386 if result_file and expect_file: |
| 387 # If cmd generated output, diff it against expected output. |
| 388 if platform.system() == 'Windows': |
| 389 diff_cmd = 'echo n | comp ' |
| 390 else: |
| 391 diff_cmd = 'diff ' |
| 392 diff_cmd += '%s %s' % (result_file, expect_file) |
| 393 diff_ret = runner(diff_cmd, debug) |
| 394 self.assertEqual(diff_ret, 0) |
| 395 # Return the generated function to the caller. |
| 396 return test_func |
OLD | NEW |