OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Runs each test cases as a single shard, single process execution. |
| 7 |
| 8 Similar to sharding_supervisor.py but finer grained. Runs multiple instances in |
| 9 parallel. |
| 10 """ |
| 11 |
| 12 import fnmatch |
| 13 import json |
| 14 import logging |
| 15 import optparse |
| 16 import os |
| 17 import Queue |
| 18 import subprocess |
| 19 import sys |
| 20 import threading |
| 21 import time |
| 22 |
| 23 |
| 24 # These are known to influence the way the output is generated. |
| 25 KNOWN_GTEST_ENV_VARS = [ |
| 26 'GTEST_ALSO_RUN_DISABLED_TESTS', |
| 27 'GTEST_BREAK_ON_FAILURE', |
| 28 'GTEST_CATCH_EXCEPTIONS', |
| 29 'GTEST_COLOR', |
| 30 'GTEST_FILTER', |
| 31 'GTEST_OUTPUT', |
| 32 'GTEST_PRINT_TIME', |
| 33 'GTEST_RANDOM_SEED', |
| 34 'GTEST_REPEAT', |
| 35 'GTEST_SHARD_INDEX', |
| 36 'GTEST_SHARD_STATUS_FILE', |
| 37 'GTEST_SHUFFLE', |
| 38 'GTEST_THROW_ON_FAILURE', |
| 39 'GTEST_TOTAL_SHARDS', |
| 40 ] |
| 41 |
| 42 # These needs to be poped out before running a test. |
| 43 GTEST_ENV_VARS_TO_REMOVE = [ |
| 44 # TODO(maruel): Handle. |
| 45 'GTEST_ALSO_RUN_DISABLED_TESTS', |
| 46 'GTEST_FILTER', |
| 47 # TODO(maruel): Handle. |
| 48 'GTEST_OUTPUT', |
| 49 # TODO(maruel): Handle. |
| 50 'GTEST_RANDOM_SEED', |
| 51 # TODO(maruel): Handle. |
| 52 'GTEST_REPEAT', |
| 53 'GTEST_SHARD_INDEX', |
| 54 # TODO(maruel): Handle. |
| 55 'GTEST_SHUFFLE', |
| 56 'GTEST_TOTAL_SHARDS', |
| 57 ] |
| 58 |
| 59 |
| 60 def num_processors(): |
| 61 """Returns the number of processors. |
| 62 |
| 63 Python on OSX 10.6 raises a NotImplementedError exception. |
| 64 """ |
| 65 try: |
| 66 # Multiprocessing |
| 67 import multiprocessing |
| 68 return multiprocessing.cpu_count() |
| 69 except: # pylint: disable=W0702 |
| 70 # Mac OS 10.6 |
| 71 return int(os.sysconf('SC_NPROCESSORS_ONLN')) |
| 72 |
| 73 |
| 74 if subprocess.mswindows: |
| 75 import msvcrt # pylint: disable=F0401 |
| 76 from ctypes import wintypes |
| 77 from ctypes import windll |
| 78 |
| 79 def ReadFile(handle, desired_bytes): |
| 80 """Calls kernel32.ReadFile().""" |
| 81 c_read = wintypes.DWORD() |
| 82 buff = wintypes.create_string_buffer(desired_bytes+1) |
| 83 windll.kernel32.ReadFile( |
| 84 handle, buff, desired_bytes, wintypes.byref(c_read), None) |
| 85 # NULL terminate it. |
| 86 buff[c_read.value] = '\x00' |
| 87 return wintypes.GetLastError(), buff.value |
| 88 |
| 89 def PeekNamedPipe(handle): |
| 90 """Calls kernel32.PeekNamedPipe(). Simplified version.""" |
| 91 c_avail = wintypes.DWORD() |
| 92 c_message = wintypes.DWORD() |
| 93 success = windll.kernel32.PeekNamedPipe( |
| 94 handle, None, 0, None, wintypes.byref(c_avail), |
| 95 wintypes.byref(c_message)) |
| 96 if not success: |
| 97 raise OSError(wintypes.GetLastError()) |
| 98 return c_avail.value |
| 99 |
| 100 def recv_impl(conn, maxsize, timeout): |
| 101 """Reads from a pipe without blocking.""" |
| 102 if timeout: |
| 103 start = time.time() |
| 104 x = msvcrt.get_osfhandle(conn.fileno()) |
| 105 try: |
| 106 while True: |
| 107 avail = min(PeekNamedPipe(x), maxsize) |
| 108 if avail: |
| 109 return ReadFile(x, avail)[1] |
| 110 if not timeout or (time.time() - start) >= timeout: |
| 111 return |
| 112 # Polling rocks. |
| 113 time.sleep(0.001) |
| 114 except OSError: |
| 115 # Not classy but fits our needs. |
| 116 return None |
| 117 |
| 118 else: |
| 119 import fcntl |
| 120 import select |
| 121 |
| 122 def recv_impl(conn, maxsize, timeout): |
| 123 """Reads from a pipe without blocking.""" |
| 124 if not select.select([conn], [], [], timeout)[0]: |
| 125 return None |
| 126 |
| 127 # Temporarily make it non-blocking. |
| 128 flags = fcntl.fcntl(conn, fcntl.F_GETFL) |
| 129 if not conn.closed: |
| 130 fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK) |
| 131 try: |
| 132 return conn.read(maxsize) |
| 133 finally: |
| 134 if not conn.closed: |
| 135 fcntl.fcntl(conn, fcntl.F_SETFL, flags) |
| 136 |
| 137 |
| 138 class Failure(Exception): |
| 139 pass |
| 140 |
| 141 |
| 142 class Popen(subprocess.Popen): |
| 143 """Adds timeout support on stdout and stderr. |
| 144 |
| 145 Inspired by |
| 146 http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subpro
cess-use-on-win/ |
| 147 """ |
| 148 def recv(self, maxsize=None, timeout=None): |
| 149 """Reads from stdout asynchronously.""" |
| 150 return self._recv('stdout', maxsize, timeout) |
| 151 |
| 152 def recv_err(self, maxsize=None, timeout=None): |
| 153 """Reads from stderr asynchronously.""" |
| 154 return self._recv('stderr', maxsize, timeout) |
| 155 |
| 156 def _close(self, which): |
| 157 getattr(self, which).close() |
| 158 setattr(self, which, None) |
| 159 |
| 160 def _recv(self, which, maxsize, timeout): |
| 161 conn = getattr(self, which) |
| 162 if conn is None: |
| 163 return None |
| 164 data = recv_impl(conn, max(maxsize or 1024, 1), timeout or 0) |
| 165 if not data: |
| 166 return self._close(which) |
| 167 if self.universal_newlines: |
| 168 data = self._translate_newlines(data) |
| 169 return data |
| 170 |
| 171 |
| 172 def call_with_timeout(cmd, timeout, **kwargs): |
| 173 """Runs an executable with an optional timeout.""" |
| 174 proc = Popen( |
| 175 cmd, |
| 176 stdin=subprocess.PIPE, |
| 177 stdout=subprocess.PIPE, |
| 178 **kwargs) |
| 179 if timeout: |
| 180 start = time.time() |
| 181 output = '' |
| 182 while proc.poll() is None: |
| 183 remaining = max(timeout - (time.time() - start), 0.001) |
| 184 data = proc.recv(timeout=remaining) |
| 185 if data: |
| 186 output += data |
| 187 if (time.time() - start) >= timeout: |
| 188 break |
| 189 if (time.time() - start) >= timeout and proc.poll() is None: |
| 190 logging.debug('Kill %s %s' % ((time.time() - start) , timeout)) |
| 191 proc.kill() |
| 192 proc.wait() |
| 193 # Try reading a last time. |
| 194 while True: |
| 195 data = proc.recv() |
| 196 if not data: |
| 197 break |
| 198 output += data |
| 199 else: |
| 200 # This code path is much faster. |
| 201 output = proc.communicate()[0] |
| 202 return output, proc.returncode |
| 203 |
| 204 |
| 205 class QueueWithTimeout(Queue.Queue): |
| 206 """Implements timeout support in join().""" |
| 207 |
| 208 # QueueWithTimeout.join: Arguments number differs from overridden method |
| 209 # pylint: disable=W0221 |
| 210 def join(self, timeout=None): |
| 211 """Returns True if all tasks are finished.""" |
| 212 if not timeout: |
| 213 return Queue.Queue.join(self) |
| 214 start = time.time() |
| 215 self.all_tasks_done.acquire() |
| 216 try: |
| 217 while self.unfinished_tasks: |
| 218 remaining = time.time() - start - timeout |
| 219 if remaining <= 0: |
| 220 break |
| 221 self.all_tasks_done.wait(remaining) |
| 222 return not self.unfinished_tasks |
| 223 finally: |
| 224 self.all_tasks_done.release() |
| 225 |
| 226 |
| 227 class WorkerThread(threading.Thread): |
| 228 """Keeps the results of each task in a thread-local outputs variable.""" |
| 229 def __init__(self, tasks, *args, **kwargs): |
| 230 super(WorkerThread, self).__init__(*args, **kwargs) |
| 231 self._tasks = tasks |
| 232 self.outputs = [] |
| 233 self.exceptions = [] |
| 234 |
| 235 self.daemon = True |
| 236 self.start() |
| 237 |
| 238 def run(self): |
| 239 """Runs until a None task is queued.""" |
| 240 while True: |
| 241 task = self._tasks.get() |
| 242 if task is None: |
| 243 # We're done. |
| 244 return |
| 245 try: |
| 246 func, args, kwargs = task |
| 247 self.outputs.append(func(*args, **kwargs)) |
| 248 except Exception, e: |
| 249 logging.error('Caught exception! %s' % e) |
| 250 self.exceptions.append(sys.exc_info()) |
| 251 finally: |
| 252 self._tasks.task_done() |
| 253 |
| 254 |
| 255 class ThreadPool(object): |
| 256 """Implements a multithreaded worker pool oriented for mapping jobs with |
| 257 thread-local result storage. |
| 258 """ |
| 259 def __init__(self, num_threads): |
| 260 self._tasks = QueueWithTimeout() |
| 261 self._workers = [ |
| 262 WorkerThread(self._tasks, name='worker-%d' % i) |
| 263 for i in range(num_threads) |
| 264 ] |
| 265 |
| 266 def add_task(self, func, *args, **kwargs): |
| 267 """Adds a task, a function to be executed by a worker. |
| 268 |
| 269 The function's return value will be stored in the the worker's thread local |
| 270 outputs list. |
| 271 """ |
| 272 self._tasks.put((func, args, kwargs)) |
| 273 |
| 274 def join(self, progress=None, timeout=None): |
| 275 """Extracts all the results from each threads unordered.""" |
| 276 if progress and timeout: |
| 277 while not self._tasks.join(timeout): |
| 278 progress.print_update() |
| 279 progress.print_update() |
| 280 else: |
| 281 self._tasks.join() |
| 282 out = [] |
| 283 for w in self._workers: |
| 284 if w.exceptions: |
| 285 raise w.exceptions[0][0], w.exceptions[0][1], w.exceptions[0][2] |
| 286 out.extend(w.outputs) |
| 287 w.outputs = [] |
| 288 # Look for exceptions. |
| 289 return out |
| 290 |
| 291 def close(self): |
| 292 """Closes all the threads.""" |
| 293 for _ in range(len(self._workers)): |
| 294 # Enqueueing None causes the worker to stop. |
| 295 self._tasks.put(None) |
| 296 for t in self._workers: |
| 297 t.join() |
| 298 |
| 299 def __enter__(self): |
| 300 """Enables 'with' statement.""" |
| 301 return self |
| 302 |
| 303 def __exit__(self, exc_type, exc_value, traceback): |
| 304 """Enables 'with' statement.""" |
| 305 self.close() |
| 306 |
| 307 |
| 308 class Progress(object): |
| 309 """Prints progress and accepts updates thread-safely.""" |
| 310 def __init__(self, size): |
| 311 # To be used in the primary thread |
| 312 self.last_printed_line = '' |
| 313 self.index = 0 |
| 314 self.start = time.time() |
| 315 self.size = size |
| 316 |
| 317 # To be used in all threads. |
| 318 self.queued_lines = Queue.Queue() |
| 319 |
| 320 def update_item(self, name, index=True, size=False): |
| 321 self.queued_lines.put((name, index, size)) |
| 322 |
| 323 def print_update(self): |
| 324 """Prints the current status.""" |
| 325 while True: |
| 326 try: |
| 327 name, index, size = self.queued_lines.get_nowait() |
| 328 except Queue.Empty: |
| 329 break |
| 330 |
| 331 if size: |
| 332 self.size += 1 |
| 333 if index: |
| 334 self.index += 1 |
| 335 alignment = str(len(str(self.size))) |
| 336 next_line = ('[%' + alignment + 'd/%d] %6.2fs %s') % ( |
| 337 self.index, |
| 338 self.size, |
| 339 time.time() - self.start, |
| 340 name) |
| 341 # Fill it with whitespace. |
| 342 # TODO(maruel): Read the console width when prossible and trim |
| 343 # next_line. |
| 344 # TODO(maruel): When not a console is used, do not fill with whitepace |
| 345 # but use \n instead. |
| 346 prefix = '\r' if self.last_printed_line else '' |
| 347 line = '%s%s%s' % ( |
| 348 prefix, |
| 349 next_line, |
| 350 ' ' * max(0, len(self.last_printed_line) - len(next_line))) |
| 351 self.last_printed_line = next_line |
| 352 else: |
| 353 line = '\n%s\n' % name.strip('\n') |
| 354 self.last_printed_line = '' |
| 355 |
| 356 sys.stdout.write(line) |
| 357 |
| 358 |
| 359 def fix_python_path(cmd): |
| 360 """Returns the fixed command line to call the right python executable.""" |
| 361 out = cmd[:] |
| 362 if out[0] == 'python': |
| 363 out[0] = sys.executable |
| 364 elif out[0].endswith('.py'): |
| 365 out.insert(0, sys.executable) |
| 366 return out |
| 367 |
| 368 |
| 369 def setup_gtest_env(): |
| 370 """Copy the enviroment variables and setup for running a gtest.""" |
| 371 env = os.environ.copy() |
| 372 for name in GTEST_ENV_VARS_TO_REMOVE: |
| 373 env.pop(name, None) |
| 374 |
| 375 # Forcibly enable color by default, if not already disabled. |
| 376 env.setdefault('GTEST_COLOR', 'on') |
| 377 |
| 378 return env |
| 379 |
| 380 |
| 381 def gtest_list_tests(cmd): |
| 382 """List all the test cases for a google test. |
| 383 |
| 384 See more info at http://code.google.com/p/googletest/. |
| 385 """ |
| 386 cmd = cmd[:] |
| 387 cmd.append('--gtest_list_tests') |
| 388 env = setup_gtest_env() |
| 389 try: |
| 390 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
| 391 env=env) |
| 392 except OSError, e: |
| 393 raise Failure('Failed to run %s\n%s' % (' '.join(cmd), str(e))) |
| 394 out, err = p.communicate() |
| 395 if p.returncode: |
| 396 raise Failure( |
| 397 'Failed to run %s\nstdout:\n%s\nstderr:\n%s' % |
| 398 (' '.join(cmd), out, err), p.returncode) |
| 399 # pylint: disable=E1103 |
| 400 if err and not err.startswith('Xlib: extension "RANDR" missing on display '): |
| 401 logging.error('Unexpected spew in gtest_list_tests:\n%s\n%s', err, cmd) |
| 402 return out |
| 403 |
| 404 |
| 405 def filter_shards(tests, index, shards): |
| 406 """Filters the shards. |
| 407 |
| 408 Watch out about integer based arithmetics. |
| 409 """ |
| 410 # The following code could be made more terse but I liked the extra clarity. |
| 411 assert 0 <= index < shards |
| 412 total = len(tests) |
| 413 quotient, remainder = divmod(total, shards) |
| 414 # 1 item of each remainder is distributed over the first 0:remainder shards. |
| 415 # For example, with total == 5, index == 1, shards == 3 |
| 416 # min_bound == 2, max_bound == 4. |
| 417 min_bound = quotient * index + min(index, remainder) |
| 418 max_bound = quotient * (index + 1) + min(index + 1, remainder) |
| 419 return tests[min_bound:max_bound] |
| 420 |
| 421 |
| 422 def filter_bad_tests(tests, disabled=False, fails=False, flaky=False): |
| 423 """Filters out DISABLED_, FAILS_ or FLAKY_ tests.""" |
| 424 def starts_with(a, b, prefix): |
| 425 return a.startswith(prefix) or b.startswith(prefix) |
| 426 |
| 427 def valid(test): |
| 428 fixture, case = test.split('.', 1) |
| 429 if not disabled and starts_with(fixture, case, 'DISABLED_'): |
| 430 return False |
| 431 if not fails and starts_with(fixture, case, 'FAILS_'): |
| 432 return False |
| 433 if not flaky and starts_with(fixture, case, 'FLAKY_'): |
| 434 return False |
| 435 return True |
| 436 |
| 437 return [test for test in tests if valid(test)] |
| 438 |
| 439 |
| 440 def parse_gtest_cases(out): |
| 441 """Returns the flattened list of test cases in the executable. |
| 442 |
| 443 The returned list is sorted so it is not dependent on the order of the linked |
| 444 objects. |
| 445 |
| 446 Expected format is a concatenation of this: |
| 447 TestFixture1 |
| 448 TestCase1 |
| 449 TestCase2 |
| 450 """ |
| 451 tests = [] |
| 452 fixture = None |
| 453 lines = out.splitlines() |
| 454 while lines: |
| 455 line = lines.pop(0) |
| 456 if not line: |
| 457 break |
| 458 if not line.startswith(' '): |
| 459 fixture = line |
| 460 else: |
| 461 case = line[2:] |
| 462 if case.startswith('YOU HAVE'): |
| 463 # It's a 'YOU HAVE foo bar' line. We're done. |
| 464 break |
| 465 assert ' ' not in case |
| 466 tests.append(fixture + case) |
| 467 return sorted(tests) |
| 468 |
| 469 |
| 470 def list_test_cases(cmd, index, shards, disabled, fails, flaky): |
| 471 """Returns the list of test cases according to the specified criterias.""" |
| 472 tests = parse_gtest_cases(gtest_list_tests(cmd)) |
| 473 if shards: |
| 474 tests = filter_shards(tests, index, shards) |
| 475 return filter_bad_tests(tests, disabled, fails, flaky) |
| 476 |
| 477 |
| 478 class RunSome(object): |
| 479 """Thread-safe object deciding if testing should continue.""" |
| 480 def __init__(self, expected_count, retries, min_failures, max_failure_ratio): |
| 481 """Determines if it is better to give up testing after an amount of failures |
| 482 and successes. |
| 483 |
| 484 Arguments: |
| 485 - expected_count is the expected number of elements to run. |
| 486 - retries is how many time a failing element can be retried. retries should |
| 487 be set to the maximum number of retries per failure. This permits |
| 488 dampening the curve to determine threshold where to stop. |
| 489 - min_failures is the minimal number of failures to tolerate, to put a lower |
| 490 limit when expected_count is small. This value is multiplied by the number |
| 491 of retries. |
| 492 - max_failure_ratio is the the ratio of permitted failures, e.g. 0.1 to stop |
| 493 after 10% of failed test cases. |
| 494 |
| 495 For large values of expected_count, the number of tolerated failures will be |
| 496 at maximum "(expected_count * retries) * max_failure_ratio". |
| 497 |
| 498 For small values of expected_count, the number of tolerated failures will be |
| 499 at least "min_failures * retries". |
| 500 """ |
| 501 assert 0 < expected_count |
| 502 assert 0 <= retries < 100 |
| 503 assert 0 <= min_failures |
| 504 assert 0. < max_failure_ratio < 1. |
| 505 # Constants. |
| 506 self._expected_count = expected_count |
| 507 self._retries = retries |
| 508 self._min_failures = min_failures |
| 509 self._max_failure_ratio = max_failure_ratio |
| 510 |
| 511 self._min_failures_tolerated = self._min_failures * self._retries |
| 512 # Pre-calculate the maximum number of allowable failures. Note that |
| 513 # _max_failures can be lower than _min_failures. |
| 514 self._max_failures_tolerated = round( |
| 515 (expected_count * retries) * max_failure_ratio) |
| 516 |
| 517 # Variables. |
| 518 self._lock = threading.Lock() |
| 519 self._passed = 0 |
| 520 self._failures = 0 |
| 521 |
| 522 def should_stop(self): |
| 523 """Stops once a threshold was reached. This includes retries.""" |
| 524 with self._lock: |
| 525 # Accept at least the minimum number of failures. |
| 526 if self._failures <= self._min_failures_tolerated: |
| 527 return False |
| 528 return self._failures >= self._max_failures_tolerated |
| 529 |
| 530 def got_result(self, passed): |
| 531 with self._lock: |
| 532 if passed: |
| 533 self._passed += 1 |
| 534 else: |
| 535 self._failures += 1 |
| 536 |
| 537 def __str__(self): |
| 538 return '%s(%d, %d, %d, %.3f)' % ( |
| 539 self.__class__.__name__, |
| 540 self._expected_count, |
| 541 self._retries, |
| 542 self._min_failures, |
| 543 self._max_failure_ratio) |
| 544 |
| 545 |
| 546 class RunAll(object): |
| 547 """Never fails.""" |
| 548 @staticmethod |
| 549 def should_stop(): |
| 550 return False |
| 551 @staticmethod |
| 552 def got_result(_): |
| 553 pass |
| 554 |
| 555 |
| 556 class Runner(object): |
| 557 def __init__(self, cmd, cwd_dir, timeout, progress, retry_count, decider): |
| 558 # Constants |
| 559 self.cmd = cmd[:] |
| 560 self.cwd_dir = cwd_dir |
| 561 self.timeout = timeout |
| 562 self.progress = progress |
| 563 self.retry_count = retry_count |
| 564 # It is important to remove the shard environment variables since it could |
| 565 # conflict with --gtest_filter. |
| 566 self.env = setup_gtest_env() |
| 567 self.decider = decider |
| 568 |
| 569 def map(self, test_case): |
| 570 """Traces a single test case and returns its output.""" |
| 571 cmd = self.cmd[:] |
| 572 cmd.append('--gtest_filter=%s' % test_case) |
| 573 out = [] |
| 574 for retry in range(self.retry_count): |
| 575 if self.decider.should_stop(): |
| 576 break |
| 577 |
| 578 start = time.time() |
| 579 output, returncode = call_with_timeout( |
| 580 cmd, |
| 581 self.timeout, |
| 582 cwd=self.cwd_dir, |
| 583 stderr=subprocess.STDOUT, |
| 584 env=self.env) |
| 585 duration = time.time() - start |
| 586 data = { |
| 587 'test_case': test_case, |
| 588 'returncode': returncode, |
| 589 'duration': duration, |
| 590 # It needs to be valid utf-8 otherwise it can't be store. |
| 591 'output': output.decode('ascii', 'ignore').encode('utf-8'), |
| 592 } |
| 593 if '[ RUN ]' not in output: |
| 594 # Can't find gtest marker, mark it as invalid. |
| 595 returncode = returncode or 1 |
| 596 self.decider.got_result(not bool(returncode)) |
| 597 out.append(data) |
| 598 if sys.platform == 'win32': |
| 599 output = output.replace('\r\n', '\n') |
| 600 size = returncode and retry != self.retry_count - 1 |
| 601 if retry: |
| 602 self.progress.update_item( |
| 603 '%s (%.2fs) - retry #%d' % (test_case, duration, retry), |
| 604 True, |
| 605 size) |
| 606 else: |
| 607 self.progress.update_item( |
| 608 '%s (%.2fs)' % (test_case, duration), True, size) |
| 609 if logging.getLogger().isEnabledFor(logging.INFO): |
| 610 self.progress.update_item(output, False, False) |
| 611 if not returncode: |
| 612 break |
| 613 else: |
| 614 # The test failed. Print its output. No need to print it with logging |
| 615 # level at INFO since it was already printed above. |
| 616 if not logging.getLogger().isEnabledFor(logging.INFO): |
| 617 self.progress.update_item(output, False, False) |
| 618 return out |
| 619 |
| 620 |
| 621 def get_test_cases(cmd, whitelist, blacklist, index, shards): |
| 622 """Returns the filtered list of test cases. |
| 623 |
| 624 This is done synchronously. |
| 625 """ |
| 626 try: |
| 627 tests = list_test_cases(cmd, index, shards, False, False, False) |
| 628 except Failure, e: |
| 629 print e.args[0] |
| 630 return None |
| 631 |
| 632 if shards: |
| 633 # This is necessary for Swarm log parsing. |
| 634 print 'Note: This is test shard %d of %d.' % (index+1, shards) |
| 635 |
| 636 # Filters the test cases with the two lists. |
| 637 if blacklist: |
| 638 tests = [ |
| 639 t for t in tests if not any(fnmatch.fnmatch(t, s) for s in blacklist) |
| 640 ] |
| 641 if whitelist: |
| 642 tests = [ |
| 643 t for t in tests if any(fnmatch.fnmatch(t, s) for s in whitelist) |
| 644 ] |
| 645 logging.info('Found %d test cases in %s' % (len(tests), ' '.join(cmd))) |
| 646 return tests |
| 647 |
| 648 |
| 649 def LogResults(result_file, results): |
| 650 """Write the results out to a file if one is given.""" |
| 651 if not result_file: |
| 652 return |
| 653 with open(result_file, 'wb') as f: |
| 654 json.dump(results, f, sort_keys=True, indent=2) |
| 655 |
| 656 |
| 657 def run_test_cases(cmd, test_cases, jobs, timeout, run_all, result_file): |
| 658 """Traces test cases one by one.""" |
| 659 if not test_cases: |
| 660 return 0 |
| 661 progress = Progress(len(test_cases)) |
| 662 retries = 3 |
| 663 if run_all: |
| 664 decider = RunAll() |
| 665 else: |
| 666 # If 10% of test cases fail, just too bad. |
| 667 decider = RunSome(len(test_cases), retries, 2, 0.1) |
| 668 with ThreadPool(jobs) as pool: |
| 669 function = Runner(cmd, os.getcwd(), timeout, progress, retries, decider).map |
| 670 for test_case in test_cases: |
| 671 pool.add_task(function, test_case) |
| 672 results = pool.join(progress, 0.1) |
| 673 duration = time.time() - progress.start |
| 674 results = dict((item[0]['test_case'], item) for item in results if item) |
| 675 LogResults(result_file, results) |
| 676 sys.stdout.write('\n') |
| 677 total = len(results) |
| 678 if not total: |
| 679 return 1 |
| 680 |
| 681 # Classify the results |
| 682 success = [] |
| 683 flaky = [] |
| 684 fail = [] |
| 685 nb_runs = 0 |
| 686 for test_case in sorted(results): |
| 687 items = results[test_case] |
| 688 nb_runs += len(items) |
| 689 if not any(not i['returncode'] for i in items): |
| 690 fail.append(test_case) |
| 691 elif len(items) > 1 and any(not i['returncode'] for i in items): |
| 692 flaky.append(test_case) |
| 693 elif len(items) == 1 and items[0]['returncode'] == 0: |
| 694 success.append(test_case) |
| 695 else: |
| 696 assert False, items |
| 697 |
| 698 print 'Summary:' |
| 699 for test_case in sorted(flaky): |
| 700 items = results[test_case] |
| 701 print '%s is flaky (tried %d times)' % (test_case, len(items)) |
| 702 |
| 703 for test_case in sorted(fail): |
| 704 print '%s failed' % (test_case) |
| 705 |
| 706 if decider.should_stop(): |
| 707 print '** STOPPED EARLY due to high failure rate **' |
| 708 print 'Success: %4d %5.2f%%' % (len(success), len(success) * 100. / total) |
| 709 print 'Flaky: %4d %5.2f%%' % (len(flaky), len(flaky) * 100. / total) |
| 710 print 'Fail: %4d %5.2f%%' % (len(fail), len(fail) * 100. / total) |
| 711 print '%.1fs Done running %d tests with %d executions. %.1f test/s' % ( |
| 712 duration, |
| 713 len(results), |
| 714 nb_runs, |
| 715 nb_runs / duration) |
| 716 return int(bool(fail)) |
| 717 |
| 718 |
| 719 class OptionParserWithLogging(optparse.OptionParser): |
| 720 """Adds --verbose option.""" |
| 721 def __init__(self, verbose=0, **kwargs): |
| 722 optparse.OptionParser.__init__(self, **kwargs) |
| 723 self.add_option( |
| 724 '-v', '--verbose', |
| 725 action='count', |
| 726 default=verbose, |
| 727 help='Use multiple times to increase verbosity') |
| 728 |
| 729 def parse_args(self, *args, **kwargs): |
| 730 options, args = optparse.OptionParser.parse_args(self, *args, **kwargs) |
| 731 levels = [logging.ERROR, logging.INFO, logging.DEBUG] |
| 732 logging.basicConfig( |
| 733 level=levels[min(len(levels)-1, options.verbose)], |
| 734 format='%(levelname)5s %(module)15s(%(lineno)3d): %(message)s') |
| 735 return options, args |
| 736 |
| 737 |
| 738 class OptionParserWithTestSharding(OptionParserWithLogging): |
| 739 """Adds automatic handling of test sharding""" |
| 740 def __init__(self, **kwargs): |
| 741 OptionParserWithLogging.__init__(self, **kwargs) |
| 742 |
| 743 def as_digit(variable, default): |
| 744 return int(variable) if variable.isdigit() else default |
| 745 |
| 746 group = optparse.OptionGroup(self, 'Which shard to run') |
| 747 group.add_option( |
| 748 '-I', '--index', |
| 749 type='int', |
| 750 default=as_digit(os.environ.get('GTEST_SHARD_INDEX', ''), None), |
| 751 help='Shard index to run') |
| 752 group.add_option( |
| 753 '-S', '--shards', |
| 754 type='int', |
| 755 default=as_digit(os.environ.get('GTEST_TOTAL_SHARDS', ''), None), |
| 756 help='Total number of shards to calculate from the --index to run') |
| 757 self.add_option_group(group) |
| 758 |
| 759 def parse_args(self, *args, **kwargs): |
| 760 options, args = OptionParserWithLogging.parse_args(self, *args, **kwargs) |
| 761 if bool(options.shards) != bool(options.index is not None): |
| 762 self.error('Use both --index X --shards Y or none of them') |
| 763 return options, args |
| 764 |
| 765 |
| 766 class OptionParserWithTestShardingAndFiltering(OptionParserWithTestSharding): |
| 767 """Adds automatic handling of test sharding and filtering.""" |
| 768 def __init__(self, *args, **kwargs): |
| 769 OptionParserWithTestSharding.__init__(self, *args, **kwargs) |
| 770 |
| 771 group = optparse.OptionGroup(self, 'Which test cases to run') |
| 772 group.add_option( |
| 773 '-w', '--whitelist', |
| 774 default=[], |
| 775 action='append', |
| 776 help='filter to apply to test cases to run, wildcard-style, defaults ' |
| 777 'to all test') |
| 778 group.add_option( |
| 779 '-b', '--blacklist', |
| 780 default=[], |
| 781 action='append', |
| 782 help='filter to apply to test cases to skip, wildcard-style, defaults ' |
| 783 'to no test') |
| 784 group.add_option( |
| 785 '-T', '--test-case-file', |
| 786 help='File containing the exact list of test cases to run') |
| 787 group.add_option( |
| 788 '--gtest_filter', |
| 789 default=os.environ.get('GTEST_FILTER', ''), |
| 790 help='Runs a single test, provideded to keep compatibility with ' |
| 791 'other tools') |
| 792 self.add_option_group(group) |
| 793 |
| 794 def parse_args(self, *args, **kwargs): |
| 795 options, args = OptionParserWithTestSharding.parse_args( |
| 796 self, *args, **kwargs) |
| 797 |
| 798 if options.gtest_filter: |
| 799 # Override any other option. |
| 800 # Based on UnitTestOptions::FilterMatchesTest() in |
| 801 # http://code.google.com/p/googletest/source/browse/#svn%2Ftrunk%2Fsrc |
| 802 if '-' in options.gtest_filter: |
| 803 options.whitelist, options.blacklist = options.gtest_filter.split('-', |
| 804 1) |
| 805 else: |
| 806 options.whitelist = options.gtest_filter |
| 807 options.blacklist = '' |
| 808 options.whitelist = [i for i in options.whitelist.split(':') if i] |
| 809 options.blacklist = [i for i in options.blacklist.split(':') if i] |
| 810 |
| 811 return options, args |
| 812 |
| 813 @staticmethod |
| 814 def process_gtest_options(cmd, options): |
| 815 """Grabs the test cases.""" |
| 816 if options.test_case_file: |
| 817 with open(options.test_case_file, 'r') as f: |
| 818 return sorted(filter(None, f.read().splitlines())) |
| 819 else: |
| 820 return get_test_cases( |
| 821 cmd, |
| 822 options.whitelist, |
| 823 options.blacklist, |
| 824 options.index, |
| 825 options.shards) |
| 826 |
| 827 |
| 828 class OptionParserTestCases(OptionParserWithTestShardingAndFiltering): |
| 829 def __init__(self, *args, **kwargs): |
| 830 OptionParserWithTestShardingAndFiltering.__init__(self, *args, **kwargs) |
| 831 self.add_option( |
| 832 '-j', '--jobs', |
| 833 type='int', |
| 834 default=num_processors(), |
| 835 help='number of parallel jobs; default=%default') |
| 836 self.add_option( |
| 837 '-t', '--timeout', |
| 838 type='int', |
| 839 default=120, |
| 840 help='Timeout for a single test case, in seconds default:%default') |
| 841 |
| 842 |
| 843 def main(argv): |
| 844 """CLI frontend to validate arguments.""" |
| 845 parser = OptionParserTestCases( |
| 846 usage='%prog <options> [gtest]', |
| 847 verbose=int(os.environ.get('ISOLATE_DEBUG', 0))) |
| 848 parser.add_option( |
| 849 '--run-all', |
| 850 action='store_true', |
| 851 default=bool(int(os.environ.get('RUN_TEST_CASES_RUN_ALL', '0'))), |
| 852 help='Do not fail early when a large number of test cases fail') |
| 853 parser.add_option( |
| 854 '--no-dump', |
| 855 action='store_true', |
| 856 help='do not generate a .run_test_cases file') |
| 857 parser.add_option( |
| 858 '--result', |
| 859 default=os.environ.get('RUN_TEST_CASES_RESULT_FILE', ''), |
| 860 help='Override the default name of the generated .run_test_cases file') |
| 861 parser.add_option( |
| 862 '--gtest_list_tests', |
| 863 action='store_true', |
| 864 help='List all the test cases unformatted. Keeps compatibility with the ' |
| 865 'executable itself.') |
| 866 options, args = parser.parse_args(argv) |
| 867 |
| 868 if not args: |
| 869 parser.error( |
| 870 'Please provide the executable line to run, if you need fancy things ' |
| 871 'like xvfb, start this script from *inside* xvfb, it\'ll be much faster' |
| 872 '.') |
| 873 |
| 874 cmd = fix_python_path(args) |
| 875 |
| 876 if options.gtest_list_tests: |
| 877 # Special case, return the output of the target unmodified. |
| 878 return subprocess.call(args + ['--gtest_list_tests']) |
| 879 |
| 880 test_cases = parser.process_gtest_options(cmd, options) |
| 881 if not test_cases: |
| 882 # If test_cases is None then there was a problem generating the tests to |
| 883 # run, so this should be considered a failure. |
| 884 return int(test_cases is None) |
| 885 |
| 886 if options.no_dump: |
| 887 result_file = None |
| 888 else: |
| 889 if options.result: |
| 890 result_file = options.result |
| 891 else: |
| 892 result_file = '%s.run_test_cases' % args[-1] |
| 893 |
| 894 return run_test_cases( |
| 895 cmd, |
| 896 test_cases, |
| 897 options.jobs, |
| 898 options.timeout, |
| 899 options.run_all, |
| 900 result_file) |
| 901 |
| 902 |
| 903 if __name__ == '__main__': |
| 904 sys.exit(main(sys.argv[1:])) |
OLD | NEW |