Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: gslib/commands/test.py

Issue 698893003: Update checked in version of gsutil to version 4.6 (Closed) Base URL: http://dart.googlecode.com/svn/third_party/gsutil/
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gslib/commands/stat.py ('k') | gslib/commands/update.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # -*- coding: utf-8 -*-
1 # Copyright 2011 Google Inc. All Rights Reserved. 2 # Copyright 2011 Google Inc. All Rights Reserved.
2 # 3 #
3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License. 5 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at 6 # You may obtain a copy of the License at
6 # 7 #
7 # http://www.apache.org/licenses/LICENSE-2.0 8 # http://www.apache.org/licenses/LICENSE-2.0
8 # 9 #
9 # Unless required by applicable law or agreed to in writing, software 10 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and 13 # See the License for the specific language governing permissions and
13 # limitations under the License. 14 # limitations under the License.
15 """Implementation of gsutil test command."""
14 16
15 # Get the system logging module, not our local logging module.
16 from __future__ import absolute_import 17 from __future__ import absolute_import
17 18
18 import logging 19 import logging
19 import os.path 20 import os
20 import re 21 import subprocess
22 import sys
23 import tempfile
24 import textwrap
25 import time
21 26
27 import gslib
22 from gslib.command import Command 28 from gslib.command import Command
23 from gslib.command import COMMAND_NAME 29 from gslib.command import ResetFailureCount
24 from gslib.command import COMMAND_NAME_ALIASES
25 from gslib.command import FILE_URIS_OK
26 from gslib.command import MAX_ARGS
27 from gslib.command import MIN_ARGS
28 from gslib.command import PROVIDER_URIS_OK
29 from gslib.command import SUPPORTED_SUB_ARGS
30 from gslib.command import URIS_START_ARG
31 from gslib.exception import CommandException 30 from gslib.exception import CommandException
32 from gslib.help_provider import HELP_NAME
33 from gslib.help_provider import HELP_NAME_ALIASES
34 from gslib.help_provider import HELP_ONE_LINE_SUMMARY
35 from gslib.help_provider import HELP_TEXT
36 from gslib.help_provider import HELP_TYPE
37 from gslib.help_provider import HelpType
38 import gslib.tests as tests 31 import gslib.tests as tests
32 from gslib.util import IS_WINDOWS
39 from gslib.util import NO_MAX 33 from gslib.util import NO_MAX
40 34
41 35
42 # For Python 2.6, unittest2 is required to run the tests. If it's not available, 36 # For Python 2.6, unittest2 is required to run the tests. If it's not available,
43 # display an error if the test command is run instead of breaking the whole 37 # display an error if the test command is run instead of breaking the whole
44 # program. 38 # program.
39 # pylint: disable=g-import-not-at-top
45 try: 40 try:
46 from gslib.tests.util import GetTestNames 41 from gslib.tests.util import GetTestNames
47 from gslib.tests.util import unittest 42 from gslib.tests.util import unittest
48 except ImportError as e: 43 except ImportError as e:
49 if 'unittest2' in str(e): 44 if 'unittest2' in str(e):
50 unittest = None 45 unittest = None
51 GetTestNames = None 46 GetTestNames = None # pylint: disable=invalid-name
52 else: 47 else:
53 raise 48 raise
54 49
55 50
56 _detailed_help_text = (""" 51 try:
52 import coverage
53 except ImportError:
54 coverage = None
55
56
57 DEFAULT_TEST_PARALLEL_PROCESSES = 15
58 DEFAULT_S3_TEST_PARALLEL_PROCESSES = 50
59
60
61 _DETAILED_HELP_TEXT = ("""
57 <B>SYNOPSIS</B> 62 <B>SYNOPSIS</B>
58 gsutil test [-l] [-u] [-f] [command command...] 63 gsutil test [-l] [-u] [-f] [command command...]
59 64
60 65
61 <B>DESCRIPTION</B> 66 <B>DESCRIPTION</B>
62 The gsutil test command runs the gsutil unit tests and integration tests. 67 The gsutil test command runs the gsutil unit tests and integration tests.
63 The unit tests use an in-memory mock storage service implementation, while 68 The unit tests use an in-memory mock storage service implementation, while
64 the integration tests send requests to the production service. 69 the integration tests send requests to the production service using the
70 preferred API set in the boto configuration file (see "gsutil help apis" for
71 details).
65 72
66 To run both the unit tests and integration tests, run the command with no 73 To run both the unit tests and integration tests, run the command with no
67 arguments: 74 arguments:
68 75
69 gsutil test 76 gsutil test
70 77
71 To run the unit tests only (which run quickly): 78 To run the unit tests only (which run quickly):
72 79
73 gsutil test -u 80 gsutil test -u
74 81
82 To run integration tests in parallel (CPU-intensive but much faster):
83
84 gsutil -m test
85
86 To limit the number of tests run in parallel to 10 at a time:
87
88 gsutil -m test -p 10
89
75 To see additional details for test failures: 90 To see additional details for test failures:
76 91
77 gsutil -d test 92 gsutil -d test
78 93
79 To have the tests stop running immediately when an error occurs: 94 To have the tests stop running immediately when an error occurs:
80 95
81 gsutil test -f 96 gsutil test -f
82 97
83 To run tests for one or more individual commands add those commands as 98 To run tests for one or more individual commands add those commands as
84 arguments. For example, the following command will run the cp and mv command 99 arguments. For example, the following command will run the cp and mv command
85 tests: 100 tests:
86 101
87 gsutil test cp mv 102 gsutil test cp mv
88 103
89 To list available tests, run the test command with the -l argument: 104 To list available tests, run the test command with the -l argument:
90 105
91 gsutil test -l 106 gsutil test -l
92 107
93 The tests are defined in the code under the gslib/tests module. Each test 108 The tests are defined in the code under the gslib/tests module. Each test
94 file is of the format test_[name].py where [name] is the test name you can 109 file is of the format test_[name].py where [name] is the test name you can
95 pass to this command. For example, running "gsutil test ls" would run the 110 pass to this command. For example, running "gsutil test ls" would run the
96 tests in "gslib/tests/test_ls.py". 111 tests in "gslib/tests/test_ls.py".
97 112
98 You can also run an individual test class or function name by passing the 113 You can also run an individual test class or function name by passing the
99 the test module followed by the class name and optionally a test name. For 114 test module followed by the class name and optionally a test name. For
100 example, to run the an entire test class by name: 115 example, to run the an entire test class by name:
101 116
102 gsutil test naming.GsutilNamingTests 117 gsutil test naming.GsutilNamingTests
103 118
104 or an individual test function: 119 or an individual test function:
105 120
106 gsutil test cp.TestCp.test_streaming 121 gsutil test cp.TestCp.test_streaming
107 122
108 You can list the available tests under a module or class by passing arguments 123 You can list the available tests under a module or class by passing arguments
109 with the -l option. For example, to list all available test functions in the 124 with the -l option. For example, to list all available test functions in the
110 cp module: 125 cp module:
111 126
112 gsutil test -l cp 127 gsutil test -l cp
113 128
129 To output test coverage:
130
131 gsutil -m test -c -p 500
132 coverage html
133
134 This will output an HTML report to a directory named 'htmlcov'.
135
114 136
115 <B>OPTIONS</B> 137 <B>OPTIONS</B>
138 -c Output coverage information.
139
140 -f Exit on first test failure.
141
116 -l List available tests. 142 -l List available tests.
117 143
144 -p N Run at most N tests in parallel. The default value is %d.
145
146 -s Run tests against S3 instead of GS.
147
118 -u Only run unit tests. 148 -u Only run unit tests.
119 149 """ % DEFAULT_TEST_PARALLEL_PROCESSES)
120 -f Exit on first test failure.
121 """)
122 150
123 151
124 def MakeCustomTestResultClass(total_tests): 152 def MakeCustomTestResultClass(total_tests):
125 """Creates a closure of CustomTestResult. 153 """Creates a closure of CustomTestResult.
126 154
127 Args: 155 Args:
128 total_tests: The total number of tests being run. 156 total_tests: The total number of tests being run.
129 157
130 Returns: 158 Returns:
131 An instance of CustomTestResult. 159 An instance of CustomTestResult.
132 """ 160 """
133 161
134 class CustomTestResult(unittest.TextTestResult): 162 class CustomTestResult(unittest.TextTestResult):
135 """A subclass of unittest.TextTestResult that prints a progress report.""" 163 """A subclass of unittest.TextTestResult that prints a progress report."""
136 164
137 def startTest(self, test): 165 def startTest(self, test):
138 super(CustomTestResult, self).startTest(test) 166 super(CustomTestResult, self).startTest(test)
139 if self.dots: 167 if self.dots:
140 id = '.'.join(test.id().split('.')[-2:]) 168 test_id = '.'.join(test.id().split('.')[-2:])
141 message = ('\r%d/%d finished - E[%d] F[%d] s[%d] - %s' % ( 169 message = ('\r%d/%d finished - E[%d] F[%d] s[%d] - %s' % (
142 self.testsRun, total_tests, len(self.errors), 170 self.testsRun, total_tests, len(self.errors),
143 len(self.failures), len(self.skipped), id)) 171 len(self.failures), len(self.skipped), test_id))
144 message = message[:73] 172 message = message[:73]
145 message = message.ljust(73) 173 message = message.ljust(73)
146 self.stream.write('%s - ' % message) 174 self.stream.write('%s - ' % message)
147 175
148 return CustomTestResult 176 return CustomTestResult
149 177
150 178
179 def GetTestNamesFromSuites(test_suite):
180 """Takes a list of test suites and returns a list of contained test names."""
181 suites = [test_suite]
182 test_names = []
183 while suites:
184 suite = suites.pop()
185 for test in suite:
186 if isinstance(test, unittest.TestSuite):
187 suites.append(test)
188 else:
189 test_names.append(test.id()[len('gslib.tests.test_'):])
190 return test_names
191
192
193 # pylint: disable=protected-access
194 # Need to get into the guts of unittest to evaluate test cases for parallelism.
195 def TestCaseToName(test_case):
196 """Converts a python.unittest to its gsutil test-callable name."""
197 return (str(test_case.__class__).split('\'')[1] + '.' +
198 test_case._testMethodName)
199
200
201 # pylint: disable=protected-access
202 # Need to get into the guts of unittest to evaluate test cases for parallelism.
203 def SplitParallelizableTestSuite(test_suite):
204 """Splits a test suite into groups with different running properties.
205
206 Args:
207 test_suite: A python unittest test suite.
208
209 Returns:
210 3-part tuple of lists of test names:
211 (tests that must be run sequentially,
212 integration tests that can run in parallel,
213 unit tests that can be run in parallel)
214 """
215 # pylint: disable=import-not-at-top
216 # Need to import this after test globals are set so that skip functions work.
217 from gslib.tests.testcase.unit_testcase import GsUtilUnitTestCase
218 sequential_tests = []
219 parallelizable_integration_tests = []
220 parallelizable_unit_tests = []
221
222 items_to_evaluate = [test_suite]
223 cases_to_evaluate = []
224 # Expand the test suites into individual test cases:
225 while items_to_evaluate:
226 suite_or_case = items_to_evaluate.pop()
227 if isinstance(suite_or_case, unittest.suite.TestSuite):
228 for item in suite_or_case._tests:
229 items_to_evaluate.append(item)
230 elif isinstance(suite_or_case, unittest.TestCase):
231 cases_to_evaluate.append(suite_or_case)
232
233 for test_case in cases_to_evaluate:
234 test_method = getattr(test_case, test_case._testMethodName, None)
235 if not getattr(test_method, 'is_parallelizable', True):
236 sequential_tests.append(TestCaseToName(test_case))
237 elif isinstance(test_case, GsUtilUnitTestCase):
238 parallelizable_unit_tests.append(TestCaseToName(test_case))
239 else:
240 parallelizable_integration_tests.append(TestCaseToName(test_case))
241
242 return (sorted(sequential_tests),
243 sorted(parallelizable_integration_tests),
244 sorted(parallelizable_unit_tests))
245
246
247 def CountFalseInList(input_list):
248 """Counts number of falses in the input list."""
249 num_false = 0
250 for item in input_list:
251 if not item:
252 num_false += 1
253 return num_false
254
255
256 def CreateTestProcesses(parallel_tests, test_index, process_list, process_done,
257 max_parallel_tests, root_coverage_file=None):
258 """Creates test processes to run tests in parallel.
259
260 Args:
261 parallel_tests: List of all parallel tests.
262 test_index: List index of last created test before this function call.
263 process_list: List of running subprocesses. Created processes are appended
264 to this list.
265 process_done: List of booleans indicating process completion. One 'False'
266 will be added per process created.
267 max_parallel_tests: Maximum number of tests to run in parallel.
268 root_coverage_file: The root .coverage filename if coverage is requested.
269
270 Returns:
271 Index of last created test.
272 """
273 orig_test_index = test_index
274 executable_prefix = [sys.executable] if sys.executable and IS_WINDOWS else []
275 s3_argument = ['-s'] if tests.util.RUN_S3_TESTS else []
276
277 process_create_start_time = time.time()
278 last_log_time = process_create_start_time
279 while (CountFalseInList(process_done) < max_parallel_tests and
280 test_index < len(parallel_tests)):
281 env = os.environ.copy()
282 if root_coverage_file:
283 env['GSUTIL_COVERAGE_OUTPUT_FILE'] = root_coverage_file
284 process_list.append(subprocess.Popen(
285 executable_prefix + [gslib.GSUTIL_PATH] + ['test'] + s3_argument +
286 [parallel_tests[test_index][len('gslib.tests.test_'):]],
287 stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env))
288 test_index += 1
289 process_done.append(False)
290 if time.time() - last_log_time > 5:
291 print ('Created %d new processes (total %d/%d created)' %
292 (test_index - orig_test_index, len(process_list),
293 len(parallel_tests)))
294 last_log_time = time.time()
295 if test_index == len(parallel_tests):
296 print ('Test process creation finished (%d/%d created)' %
297 (len(process_list), len(parallel_tests)))
298 return test_index
299
300
151 class TestCommand(Command): 301 class TestCommand(Command):
152 """Implementation of gsutil test command.""" 302 """Implementation of gsutil test command."""
153 303
154 # Command specification (processed by parent class). 304 # Command specification. See base class for documentation.
155 command_spec = { 305 command_spec = Command.CreateCommandSpec(
156 # Name of command. 306 'test',
157 COMMAND_NAME: 'test', 307 command_name_aliases=[],
158 # List of command name aliases. 308 min_args=0,
159 COMMAND_NAME_ALIASES: [], 309 max_args=NO_MAX,
160 # Min number of args required by this command. 310 supported_sub_args='uflp:sc',
161 MIN_ARGS: 0, 311 file_url_ok=True,
162 # Max number of args required by this command, or NO_MAX. 312 provider_url_ok=False,
163 MAX_ARGS: NO_MAX, 313 urls_start_arg=0,
164 # Getopt-style string specifying acceptable sub args. 314 )
165 SUPPORTED_SUB_ARGS: 'ufl', 315 # Help specification. See help_provider.py for documentation.
166 # True if file URIs acceptable for this command. 316 help_spec = Command.HelpSpec(
167 FILE_URIS_OK: True, 317 help_name='test',
168 # True if provider-only URIs acceptable for this command. 318 help_name_aliases=[],
169 PROVIDER_URIS_OK: False, 319 help_type='command_help',
170 # Index in args of first URI arg. 320 help_one_line_summary='Run gsutil tests',
171 URIS_START_ARG: 0, 321 help_text=_DETAILED_HELP_TEXT,
172 } 322 subcommand_help_text={},
173 help_spec = { 323 )
174 # Name of command or auxiliary help info for which this help applies.
175 HELP_NAME: 'test',
176 # List of help name aliases.
177 HELP_NAME_ALIASES: [],
178 # Type of help:
179 HELP_TYPE: HelpType.COMMAND_HELP,
180 # One line summary of this help.
181 HELP_ONE_LINE_SUMMARY: 'Run gsutil tests',
182 # The full help text.
183 HELP_TEXT: _detailed_help_text,
184 }
185 324
186 # Command entry point.
187 def RunCommand(self): 325 def RunCommand(self):
326 """Command entry point for the test command."""
188 if not unittest: 327 if not unittest:
189 raise CommandException('On Python 2.6, the unittest2 module is required ' 328 raise CommandException('On Python 2.6, the unittest2 module is required '
190 'to run the gsutil tests.') 329 'to run the gsutil tests.')
191 330
192 failfast = False 331 failfast = False
193 list_tests = False 332 list_tests = False
333 max_parallel_tests = DEFAULT_TEST_PARALLEL_PROCESSES
334 perform_coverage = False
194 if self.sub_opts: 335 if self.sub_opts:
195 for o, _ in self.sub_opts: 336 for o, a in self.sub_opts:
196 if o == '-u': 337 if o == '-c':
197 tests.util.RUN_INTEGRATION_TESTS = False 338 perform_coverage = True
198 elif o == '-f': 339 elif o == '-f':
199 failfast = True 340 failfast = True
200 elif o == '-l': 341 elif o == '-l':
201 list_tests = True 342 list_tests = True
343 elif o == '-p':
344 max_parallel_tests = long(a)
345 elif o == '-s':
346 if not tests.util.HAS_S3_CREDS:
347 raise CommandException('S3 tests require S3 credentials. Please '
348 'add appropriate credentials to your .boto '
349 'file and re-run.')
350 tests.util.RUN_S3_TESTS = True
351 elif o == '-u':
352 tests.util.RUN_INTEGRATION_TESTS = False
353
354 if perform_coverage and not coverage:
355 raise CommandException(
356 'Coverage has been requested but the coverage module was not found. '
357 'You can install it with "pip install coverage".')
358
359 if self.parallel_operations:
360 if IS_WINDOWS:
361 raise CommandException('-m test is not supported on Windows.')
362 elif (tests.util.RUN_S3_TESTS and
363 max_parallel_tests > DEFAULT_S3_TEST_PARALLEL_PROCESSES):
364 self.logger.warn(
365 'Reducing parallel tests to %d due to S3 maximum bucket '
366 'limitations.', DEFAULT_S3_TEST_PARALLEL_PROCESSES)
367 max_parallel_tests = DEFAULT_S3_TEST_PARALLEL_PROCESSES
202 368
203 test_names = sorted(GetTestNames()) 369 test_names = sorted(GetTestNames())
204 if list_tests and not self.args: 370 if list_tests and not self.args:
205 print 'Found %d test names:' % len(test_names) 371 print 'Found %d test names:' % len(test_names)
206 print ' ', '\n '.join(sorted(test_names)) 372 print ' ', '\n '.join(sorted(test_names))
207 return 0 373 return 0
208 374
209 # Set list of commands to test if supplied. 375 # Set list of commands to test if supplied.
210 if self.args: 376 if self.args:
211 commands_to_test = [] 377 commands_to_test = []
(...skipping 10 matching lines...) Expand all
222 388
223 loader = unittest.TestLoader() 389 loader = unittest.TestLoader()
224 390
225 if commands_to_test: 391 if commands_to_test:
226 try: 392 try:
227 suite = loader.loadTestsFromNames(commands_to_test) 393 suite = loader.loadTestsFromNames(commands_to_test)
228 except (ImportError, AttributeError) as e: 394 except (ImportError, AttributeError) as e:
229 raise CommandException('Invalid test argument name: %s' % e) 395 raise CommandException('Invalid test argument name: %s' % e)
230 396
231 if list_tests: 397 if list_tests:
232 suites = [suite] 398 test_names = GetTestNamesFromSuites(suite)
233 test_names = []
234 while suites:
235 suite = suites.pop()
236 for test in suite:
237 if isinstance(test, unittest.TestSuite):
238 suites.append(test)
239 else:
240 test_names.append(test.id().lstrip('gslib.tests.test_'))
241 print 'Found %d test names:' % len(test_names) 399 print 'Found %d test names:' % len(test_names)
242 print ' ', '\n '.join(sorted(test_names)) 400 print ' ', '\n '.join(sorted(test_names))
243 return 0 401 return 0
244 402
245 if logging.getLogger().getEffectiveLevel() <= logging.INFO: 403 if logging.getLogger().getEffectiveLevel() <= logging.INFO:
246 verbosity = 1 404 verbosity = 1
247 else: 405 else:
248 verbosity = 2 406 verbosity = 2
249 logging.disable(logging.ERROR) 407 logging.disable(logging.ERROR)
250 408
251 total_tests = suite.countTestCases() 409 if perform_coverage:
252 resultclass = MakeCustomTestResultClass(total_tests) 410 # We want to run coverage over the gslib module, but filter out the test
411 # modules and any third-party code. We also filter out anything under the
412 # temporary directory. Otherwise, the gsutil update test (which copies
413 # code to the temporary directory) gets included in the output.
414 coverage_controller = coverage.coverage(
415 source=['gslib'], omit=['gslib/third_party/*', 'gslib/tests/*',
416 tempfile.gettempdir() + '*'])
417 coverage_controller.erase()
418 coverage_controller.start()
253 419
254 runner = unittest.TextTestRunner(verbosity=verbosity, 420 num_parallel_failures = 0
255 resultclass=resultclass, failfast=failfast) 421 if self.parallel_operations:
256 ret = runner.run(suite) 422 sequential_tests, parallel_integration_tests, parallel_unit_tests = (
257 if ret.wasSuccessful(): 423 SplitParallelizableTestSuite(suite))
424
425 sequential_start_time = time.time()
426 # TODO: For now, run unit tests sequentially because they are fast.
427 # We could potentially shave off several seconds of execution time
428 # by executing them in parallel with the integration tests.
429 # Note that parallelism_framework unit tests cannot be run in a
430 # subprocess.
431 print 'Running %d tests sequentially.' % (len(sequential_tests) +
432 len(parallel_unit_tests))
433 sequential_tests_to_run = sequential_tests + parallel_unit_tests
434 suite = loader.loadTestsFromNames(
435 sorted([test_name for test_name in sequential_tests_to_run]))
436 num_sequential_tests = suite.countTestCases()
437 resultclass = MakeCustomTestResultClass(num_sequential_tests)
438 runner = unittest.TextTestRunner(verbosity=verbosity,
439 resultclass=resultclass,
440 failfast=failfast)
441 ret = runner.run(suite)
442
443 num_parallel_tests = len(parallel_integration_tests)
444 max_processes = min(max_parallel_tests, num_parallel_tests)
445
446 print ('\n'.join(textwrap.wrap(
447 'Running %d integration tests in parallel mode (%d processes)! '
448 'Please be patient while your CPU is incinerated. '
449 'If your machine becomes unresponsive, consider reducing '
450 'the amount of parallel test processes by running '
451 '\'gsutil -m test -p <num_processes>\'.' %
452 (num_parallel_tests, max_processes))))
453 process_list = []
454 process_done = []
455 process_results = [] # Tuples of (name, return code, stdout, stderr)
456 hang_detection_counter = 0
457 completed_as_of_last_log = 0
458 parallel_start_time = last_log_time = time.time()
459 test_index = CreateTestProcesses(
460 parallel_integration_tests, 0, process_list, process_done,
461 max_parallel_tests,
462 coverage_controller.data.filename if perform_coverage else None)
463 while len(process_results) < num_parallel_tests:
464 for proc_num in xrange(len(process_list)):
465 if process_done[proc_num] or process_list[proc_num].poll() is None:
466 continue
467 process_done[proc_num] = True
468 stdout, stderr = process_list[proc_num].communicate()
469 # TODO: Differentiate test failures from errors.
470 if process_list[proc_num].returncode != 0:
471 num_parallel_failures += 1
472 process_results.append((parallel_integration_tests[proc_num],
473 process_list[proc_num].returncode,
474 stdout, stderr))
475 if len(process_list) < num_parallel_tests:
476 test_index = CreateTestProcesses(
477 parallel_integration_tests, test_index, process_list,
478 process_done, max_parallel_tests,
479 coverage_controller.data.filename if perform_coverage else None)
480 if len(process_results) < num_parallel_tests:
481 if time.time() - last_log_time > 5:
482 print '%d/%d finished - %d failures' % (
483 len(process_results), num_parallel_tests, num_parallel_failures)
484 if len(process_results) == completed_as_of_last_log:
485 hang_detection_counter += 1
486 else:
487 completed_as_of_last_log = len(process_results)
488 hang_detection_counter = 0
489 if hang_detection_counter > 4:
490 still_running = []
491 for proc_num in xrange(len(process_list)):
492 if not process_done[proc_num]:
493 still_running.append(parallel_integration_tests[proc_num])
494 print 'Still running: %s' % still_running
495 last_log_time = time.time()
496 time.sleep(1)
497 process_run_finish_time = time.time()
498 if num_parallel_failures:
499 for result in process_results:
500 if result[1] != 0:
501 new_stderr = result[3].split('\n')
502 print 'Results for failed test %s:' % result[0]
503 for line in new_stderr:
504 print line
505
506 # TODO: Properly track test skips.
507 print 'Parallel tests complete. Success: %s Fail: %s' % (
508 num_parallel_tests - num_parallel_failures, num_parallel_failures)
509 print (
510 'Ran %d tests in %.3fs (%d sequential in %.3fs, %d parallel in %.3fs)'
511 % (num_parallel_tests + num_sequential_tests,
512 float(process_run_finish_time - sequential_start_time),
513 num_sequential_tests,
514 float(parallel_start_time - sequential_start_time),
515 num_parallel_tests,
516 float(process_run_finish_time - parallel_start_time)))
517 print
518 if not num_parallel_failures and ret.wasSuccessful():
519 print 'OK'
520 else:
521 if num_parallel_failures:
522 print 'FAILED (parallel tests)'
523 if not ret.wasSuccessful():
524 print 'FAILED (sequential tests)'
525 else:
526 total_tests = suite.countTestCases()
527 resultclass = MakeCustomTestResultClass(total_tests)
528
529 runner = unittest.TextTestRunner(verbosity=verbosity,
530 resultclass=resultclass,
531 failfast=failfast)
532 ret = runner.run(suite)
533
534 if perform_coverage:
535 coverage_controller.stop()
536 coverage_controller.combine()
537 coverage_controller.save()
538 print ('Coverage information was saved to: %s' %
539 coverage_controller.data.filename)
540
541 if ret.wasSuccessful() and not num_parallel_failures:
542 ResetFailureCount()
258 return 0 543 return 0
259 return 1 544 return 1
OLDNEW
« no previous file with comments | « gslib/commands/stat.py ('k') | gslib/commands/update.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698