Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 # Copyright 2017 The LUCI Authors. All rights reserved. | |
| 2 # Use of this source code is governed under the Apache License, Version 2.0 | |
| 3 # that can be found in the LICENSE file. | |
| 4 | |
| 5 from __future__ import print_function | |
| 6 | |
| 7 import argparse | |
| 8 import cStringIO | |
| 9 import contextlib | |
| 10 import copy | |
| 11 import coverage | |
| 12 import datetime | |
| 13 import difflib | |
| 14 import functools | |
| 15 import json | |
| 16 import multiprocessing | |
| 17 import os | |
| 18 import pprint | |
| 19 import re | |
| 20 import signal | |
| 21 import sys | |
| 22 import tempfile | |
| 23 import traceback | |
| 24 | |
| 25 from . import checker | |
| 26 from . import config_types | |
| 27 from . import loader | |
| 28 from . import run | |
| 29 from . import step_runner | |
| 30 from . import stream | |
| 31 | |
| 32 | |
| 33 # These variables must be set in the dynamic scope of the functions in this | |
| 34 # file. We do this instead of passing because they're not picklable, and | |
| 35 # that's required by multiprocessing. | |
| 36 _UNIVERSE_VIEW = None | |
| 37 _ENGINE_FLAGS = None | |
| 38 | |
| 39 | |
| 40 # An event to signal exit, for example on Ctrl-C. | |
| 41 _KILL_SWITCH = multiprocessing.Event() | |
| 42 | |
| 43 | |
| 44 # This maps from (recipe_name,test_name) -> yielded test_data. It's outside of | |
| 45 # run_recipe so that it can persist between RunRecipe calls in the same process. | |
| 46 _GEN_TEST_CACHE = {} | |
| 47 | |
| 48 | |
| 49 # Allow regex patterns to be 'deep copied' by using them as-is. | |
| 50 copy._deepcopy_dispatch[re._pattern_type] = copy._deepcopy_atomic | |
| 51 | |
| 52 | |
| 53 class PostProcessError(ValueError): | |
| 54 """Exception raised when any of the post-process hooks fails.""" | |
| 55 pass | |
| 56 | |
| 57 | |
| 58 @contextlib.contextmanager | |
| 59 def coverage_context(include=None): | |
| 60 """Context manager that records coverage data.""" | |
| 61 c = coverage.coverage(config_file=False, include=include) | |
| 62 | |
| 63 # Sometimes our strict include lists will result in a run | |
| 64 # not adding any coverage info. That's okay, avoid output spam. | |
| 65 c._warn_no_data = False | |
| 66 | |
| 67 c.start() | |
| 68 try: | |
| 69 yield c | |
| 70 finally: | |
| 71 c.stop() | |
| 72 | |
| 73 | |
| 74 class TestFailure(object): | |
| 75 """Base class for different kinds of test failures.""" | |
| 76 | |
| 77 def format(self): | |
| 78 """Returns a human-readable description of the failure.""" | |
| 79 raise NotImplementedError() | |
| 80 | |
| 81 | |
| 82 class DiffFailure(TestFailure): | |
| 83 """Failure when simulated recipe commands don't match recorded expectations. | |
| 84 """ | |
| 85 | |
| 86 def __init__(self, diff): | |
| 87 self.diff = diff | |
| 88 | |
| 89 def format(self): | |
| 90 return self.diff | |
| 91 | |
| 92 | |
| 93 class CheckFailure(TestFailure): | |
| 94 """Failure when any of the post-process checks fails.""" | |
| 95 | |
| 96 def __init__(self, check): | |
| 97 self.check = check | |
| 98 | |
| 99 def format(self): | |
| 100 return self.check.format(indent=4) | |
| 101 | |
| 102 | |
| 103 class TestResult(object): | |
| 104 """Result of running a test.""" | |
| 105 | |
| 106 def __init__(self, test_description, failures, coverage_data): | |
| 107 self.test_description = test_description | |
| 108 self.failures = failures | |
| 109 self.coverage_data = coverage_data | |
| 110 | |
| 111 | |
| 112 class TestDescription(object): | |
| 113 """Identifies a specific test. | |
| 114 | |
| 115 Deliberately small and picklable for use with multiprocessing.""" | |
| 116 | |
| 117 def __init__(self, recipe_name, test_name, expect_dir, covers): | |
| 118 self.recipe_name = recipe_name | |
| 119 self.test_name = test_name | |
| 120 self.expect_dir = expect_dir | |
| 121 self.covers = covers | |
| 122 | |
| 123 @property | |
| 124 def full_name(self): | |
| 125 return '%s.%s' % (self.recipe_name, self.test_name) | |
| 126 | |
| 127 | |
| 128 def expectation_path(expect_dir, test_name): | |
| 129 """Returns path where serialized expectation data is stored.""" | |
| 130 return os.path.join(expect_dir, test_name + '.json') | |
| 131 | |
| 132 | |
| 133 def run_test(test_description): | |
| 134 """Runs a test. Returns TestResults object.""" | |
| 135 expected = None | |
| 136 path = expectation_path( | |
| 137 test_description.expect_dir, test_description.test_name) | |
| 138 if os.path.exists(path): | |
| 139 with open(path) as f: | |
| 140 # TODO(phajdan.jr): why do we need to re-encode golden data files? | |
| 141 expected = re_encode(json.load(f)) | |
| 142 | |
| 143 actual, failed_checks, coverage_data = run_recipe( | |
| 144 test_description.recipe_name, test_description.test_name, | |
| 145 test_description.covers) | |
| 146 actual = re_encode(actual) | |
| 147 | |
| 148 failures = [] | |
| 149 | |
| 150 # TODO(phajdan.jr): handle exception (errors) in the recipe execution. | |
| 151 if failed_checks: | |
| 152 sys.stdout.write('C') | |
| 153 failures.extend([CheckFailure(c) for c in failed_checks]) | |
| 154 elif actual != expected: | |
| 155 diff = '\n'.join(difflib.unified_diff( | |
| 156 pprint.pformat(expected).splitlines(), | |
| 157 pprint.pformat(actual).splitlines(), | |
| 158 fromfile='expected', tofile='actual', | |
| 159 n=4, lineterm='')) | |
| 160 | |
| 161 failures.append(DiffFailure(diff)) | |
| 162 sys.stdout.write('F') | |
| 163 else: | |
| 164 sys.stdout.write('.') | |
| 165 sys.stdout.flush() | |
| 166 | |
| 167 return TestResult(test_description, failures, coverage_data) | |
| 168 | |
| 169 | |
| 170 def run_recipe(recipe_name, test_name, covers): | |
| 171 """Runs the recipe under test in simulation mode. | |
| 172 | |
| 173 Returns a tuple: | |
| 174 - expectation data | |
| 175 - failed post-process checks (if any) | |
| 176 - coverage data | |
| 177 """ | |
| 178 config_types.ResetTostringFns() | |
| 179 | |
| 180 # Grab test data from the cache. This way it's only generated once. | |
| 181 test_data = _GEN_TEST_CACHE[(recipe_name, test_name)] | |
| 182 | |
| 183 annotator = SimulationAnnotatorStreamEngine() | |
| 184 with stream.StreamEngineInvariants.wrap(annotator) as stream_engine: | |
| 185 runner = step_runner.SimulationStepRunner( | |
| 186 stream_engine, test_data, annotator) | |
| 187 | |
| 188 props = test_data.properties.copy() | |
| 189 props['recipe'] = recipe_name | |
| 190 engine = run.RecipeEngine( | |
| 191 runner, props, _UNIVERSE_VIEW, engine_flags=_ENGINE_FLAGS) | |
| 192 with coverage_context(include=covers) as cov: | |
| 193 # Run recipe loading under coverage context. This ensures we collect | |
| 194 # coverage of all definitions and globals. | |
| 195 recipe_script = _UNIVERSE_VIEW.load_recipe(recipe_name, engine=engine) | |
| 196 | |
| 197 api = loader.create_recipe_api( | |
| 198 _UNIVERSE_VIEW.universe.package_deps.root_package, | |
| 199 recipe_script.LOADED_DEPS, | |
| 200 recipe_script.path, engine, test_data) | |
| 201 result = engine.run(recipe_script, api, test_data.properties) | |
| 202 coverage_data = cov.get_data() | |
| 203 | |
| 204 raw_expectations = runner.steps_ran.copy() | |
| 205 # Don't include tracebacks in expectations because they are too sensitive | |
| 206 # to change. | |
| 207 # TODO(phajdan.jr): Record presence of traceback in expectations. | |
| 208 result.result.pop('traceback', None) | |
| 209 raw_expectations[result.result['name']] = result.result | |
| 210 | |
| 211 failed_checks = [] | |
| 212 | |
| 213 for hook, args, kwargs, filename, lineno in test_data.post_process_hooks: | |
| 214 input_odict = copy.deepcopy(raw_expectations) | |
| 215 # We ignore the input_odict so that it never gets printed in full. | |
| 216 # Usually the check invocation itself will index the input_odict or | |
| 217 # will use it only for a key membership comparison, which provides | |
| 218 # enough debugging context. | |
| 219 checker_obj = checker.Checker( | |
| 220 filename, lineno, hook, args, kwargs, input_odict) | |
| 221 | |
| 222 with coverage_context(include=covers) as cov: | |
| 223 # Run the hook itself under coverage. There may be custom post-process | |
| 224 # functions in recipe test code. | |
| 225 rslt = hook(checker_obj, input_odict, *args, **kwargs) | |
| 226 coverage_data.update(cov.get_data()) | |
| 227 | |
| 228 failed_checks += checker_obj.failed_checks | |
| 229 if rslt is not None: | |
| 230 msg = checker.VerifySubset(rslt, raw_expectations) | |
| 231 if msg: | |
| 232 raise PostProcessError('post_process: steps'+msg) | |
| 233 # restore 'name' | |
| 234 for k, v in rslt.iteritems(): | |
| 235 if 'name' not in v: | |
| 236 v['name'] = k | |
| 237 raw_expectations = rslt | |
| 238 | |
| 239 # empty means drop expectation | |
| 240 result_data = raw_expectations.values() if raw_expectations else None | |
| 241 return (result_data, failed_checks, coverage_data) | |
| 242 | |
| 243 | |
| 244 def get_tests(): | |
| 245 """Returns a list of tests for current recipe package.""" | |
| 246 tests = [] | |
| 247 coverage_data = coverage.CoverageData() | |
| 248 | |
| 249 all_modules = set(_UNIVERSE_VIEW.loop_over_recipe_modules()) | |
| 250 covered_modules = set() | |
| 251 | |
| 252 base_covers = [] | |
| 253 | |
| 254 coverage_include = os.path.join(_UNIVERSE_VIEW.module_dir, '*', '*.py') | |
| 255 for module in all_modules: | |
| 256 # Run module loading under coverage context. This ensures we collect | |
| 257 # coverage of all definitions and globals. | |
| 258 with coverage_context(include=coverage_include) as cov: | |
| 259 mod = _UNIVERSE_VIEW.load_recipe_module(module) | |
| 260 coverage_data.update(cov.get_data()) | |
| 261 | |
| 262 # Recipe modules can only be covered by tests inside the same module. | |
| 263 # To make transition possible for existing code (which will require | |
| 264 # writing tests), a temporary escape hatch is added. | |
| 265 # TODO(phajdan.jr): remove DISABLE_STRICT_COVERAGE (crbug/693058). | |
| 266 if mod.DISABLE_STRICT_COVERAGE: | |
| 267 covered_modules.add(module) | |
| 268 # Make sure disabling strict coverage also disables our additional check | |
| 269 # for module coverage. Note that coverage will still raise an error if | |
| 270 # the module is executed by any of the tests, but having less than 100% | |
| 271 # coverage. | |
| 272 base_covers.append(os.path.join( | |
| 273 _UNIVERSE_VIEW.module_dir, module, '*.py')) | |
| 274 | |
| 275 for recipe_path, recipe_name in _UNIVERSE_VIEW.loop_over_recipes(): | |
| 276 try: | |
| 277 covers = [recipe_path] + base_covers | |
| 278 | |
| 279 # Example/test recipes in a module always cover that module. | |
| 280 if ':' in recipe_name: | |
| 281 module, _ = recipe_name.split(':', 1) | |
| 282 covered_modules.add(module) | |
| 283 covers.append(os.path.join(_UNIVERSE_VIEW.module_dir, module, '*.py')) | |
| 284 | |
| 285 with coverage_context(include=covers) as cov: | |
| 286 # Run recipe loading under coverage context. This ensures we collect | |
| 287 # coverage of all definitions and globals. | |
| 288 recipe = _UNIVERSE_VIEW.load_recipe(recipe_name) | |
| 289 test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE_VIEW) | |
| 290 | |
| 291 root, name = os.path.split(recipe_path) | |
| 292 name = os.path.splitext(name)[0] | |
| 293 # TODO(phajdan.jr): move expectation tree outside of the recipe tree. | |
| 294 expect_dir = os.path.join(root, '%s.expected' % name) | |
| 295 | |
| 296 # Immediately convert to list to force running the generator under | |
| 297 # coverage context. Otherwise coverage would only report executing | |
| 298 # the function definition, not GenTests body. | |
| 299 recipe_tests = list(recipe.gen_tests(test_api)) | |
| 300 coverage_data.update(cov.get_data()) | |
| 301 | |
| 302 for test_data in recipe_tests: | |
| 303 # Put the test data in shared cache. This way it can only be generated | |
| 304 # once. We do this primarily for _correctness_ , for example in case | |
| 305 # a weird recipe generates tests non-deterministically. The recipe | |
| 306 # engine should be robust against such user recipe code where | |
| 307 # reasonable. | |
| 308 _GEN_TEST_CACHE[(recipe_name, test_data.name)] = copy.deepcopy( | |
| 309 test_data) | |
|
iannucci
2017/03/10 20:32:30
Isn't this in a different process? Does this make
Paweł Hajdan Jr.
2017/03/10 20:34:10
Other process will see it after the fork, just lik
| |
| 310 | |
| 311 tests.append(TestDescription( | |
| 312 recipe_name, test_data.name, expect_dir, covers)) | |
| 313 except: | |
| 314 info = sys.exc_info() | |
| 315 new_exec = Exception('While generating results for %r: %s: %s' % ( | |
| 316 recipe_name, info[0].__name__, str(info[1]))) | |
| 317 raise new_exec.__class__, new_exec, info[2] | |
| 318 | |
| 319 uncovered_modules = all_modules.difference(covered_modules) | |
| 320 return (tests, coverage_data, uncovered_modules) | |
| 321 | |
| 322 | |
| 323 def run_list(json_file): | |
| 324 """Implementation of the 'list' command.""" | |
| 325 tests, _coverage_data, _uncovered_modules = get_tests() | |
| 326 result = sorted(t.full_name for t in tests) | |
| 327 if json_file: | |
| 328 json.dump({ | |
| 329 'format': 1, | |
| 330 'tests': result, | |
| 331 }, json_file) | |
| 332 else: | |
| 333 print('\n'.join(result)) | |
| 334 return 0 | |
| 335 | |
| 336 | |
| 337 def cover_omit(): | |
| 338 """Returns list of patterns to omit from coverage analysis.""" | |
| 339 omit = [ ] | |
| 340 | |
| 341 mod_dir_base = _UNIVERSE_VIEW.module_dir | |
| 342 if os.path.isdir(mod_dir_base): | |
| 343 omit.append(os.path.join(mod_dir_base, '*', 'resources', '*')) | |
| 344 | |
| 345 # Exclude recipe engine files from simulation test coverage. Simulation tests | |
| 346 # should cover "user space" recipe code (recipes and modules), not the engine. | |
| 347 # The engine is covered by unit tests, not simulation tests. | |
| 348 omit.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '*')) | |
| 349 | |
| 350 return omit | |
| 351 | |
| 352 | |
| 353 def report_coverage_version(): | |
| 354 """Prints info about coverage module (for debugging).""" | |
| 355 print('Using coverage %s from %r' % (coverage.__version__, coverage.__file__)) | |
| 356 | |
| 357 | |
| 358 def worker(f): | |
| 359 """Wrapper for a multiprocessing worker function. | |
| 360 | |
| 361 This addresses known issues with multiprocessing workers: | |
| 362 | |
| 363 - they can hang on uncaught exceptions | |
| 364 - we need explicit kill switch to clearly terminate parent""" | |
| 365 @functools.wraps(f) | |
| 366 def wrapper(*args, **kwargs): | |
| 367 try: | |
| 368 if _KILL_SWITCH.is_set(): | |
| 369 return (False, 'kill switch') | |
| 370 return (True, f(*args, **kwargs)) | |
| 371 except Exception: | |
| 372 return (False, traceback.format_exc()) | |
| 373 return wrapper | |
| 374 | |
| 375 | |
| 376 @worker | |
| 377 def run_worker(test): | |
| 378 """Worker for 'run' command (note decorator above).""" | |
| 379 return run_test(test) | |
| 380 | |
| 381 | |
| 382 def run_run(jobs): | |
| 383 """Implementation of the 'run' command.""" | |
| 384 start_time = datetime.datetime.now() | |
| 385 | |
| 386 report_coverage_version() | |
| 387 | |
| 388 tests, coverage_data, uncovered_modules = get_tests() | |
| 389 if uncovered_modules: | |
| 390 raise Exception('The following modules lack test coverage: %s' % ( | |
| 391 ','.join(sorted(uncovered_modules)))) | |
| 392 | |
| 393 with kill_switch(): | |
| 394 pool = multiprocessing.Pool(jobs) | |
| 395 results = pool.map(run_worker, tests) | |
| 396 | |
| 397 print() | |
| 398 | |
| 399 rc = 0 | |
| 400 for success, details in results: | |
| 401 if success: | |
| 402 assert isinstance(details, TestResult) | |
| 403 if details.failures: | |
| 404 rc = 1 | |
| 405 print('%s failed:' % details.test_description.full_name) | |
| 406 for failure in details.failures: | |
| 407 print(failure.format()) | |
| 408 coverage_data.update(details.coverage_data) | |
| 409 else: | |
| 410 rc = 1 | |
| 411 print('Internal failure:') | |
| 412 print(details) | |
| 413 | |
| 414 try: | |
| 415 # TODO(phajdan.jr): Add API to coverage to load data from memory. | |
| 416 with tempfile.NamedTemporaryFile(delete=False) as coverage_file: | |
| 417 coverage_data.write_file(coverage_file.name) | |
| 418 | |
| 419 cov = coverage.coverage( | |
| 420 data_file=coverage_file.name, config_file=False, omit=cover_omit()) | |
| 421 cov.load() | |
| 422 outf = cStringIO.StringIO() | |
| 423 percentage = cov.report(file=outf, show_missing=True, skip_covered=True) | |
| 424 if int(percentage) != 100: | |
| 425 rc = 1 | |
| 426 print(outf.getvalue()) | |
| 427 print('FATAL: Insufficient coverage (%.f%%)' % int(percentage)) | |
| 428 finally: | |
| 429 os.unlink(coverage_file.name) | |
| 430 | |
| 431 finish_time = datetime.datetime.now() | |
| 432 print('-' * 70) | |
| 433 print('Ran %d tests in %0.3fs' % ( | |
| 434 len(tests), (finish_time - start_time).total_seconds())) | |
| 435 print() | |
| 436 print('OK' if rc == 0 else 'FAILED') | |
| 437 | |
| 438 return rc | |
| 439 | |
| 440 | |
| 441 class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine): | |
| 442 """Stream engine which just records generated commands.""" | |
| 443 | |
| 444 def __init__(self): | |
| 445 self._step_buffer_map = {} | |
| 446 super(SimulationAnnotatorStreamEngine, self).__init__( | |
| 447 self.step_buffer(None)) | |
| 448 | |
| 449 def step_buffer(self, step_name): | |
| 450 return self._step_buffer_map.setdefault(step_name, cStringIO.StringIO()) | |
| 451 | |
| 452 def new_step_stream(self, step_config): | |
| 453 return self._create_step_stream(step_config, | |
| 454 self.step_buffer(step_config.name)) | |
| 455 | |
| 456 | |
| 457 def handle_killswitch(*_): | |
| 458 """Function invoked by ctrl-c. Signals worker processes to exit.""" | |
| 459 _KILL_SWITCH.set() | |
| 460 | |
| 461 # Reset the signal to DFL so that double ctrl-C kills us for sure. | |
| 462 signal.signal(signal.SIGINT, signal.SIG_DFL) | |
| 463 signal.signal(signal.SIGTERM, signal.SIG_DFL) | |
| 464 | |
| 465 | |
| 466 @contextlib.contextmanager | |
| 467 def kill_switch(): | |
| 468 """Context manager to handle ctrl-c properly with multiprocessing.""" | |
| 469 orig_sigint = signal.signal(signal.SIGINT, handle_killswitch) | |
| 470 try: | |
| 471 orig_sigterm = signal.signal(signal.SIGTERM, handle_killswitch) | |
| 472 try: | |
| 473 yield | |
| 474 finally: | |
| 475 signal.signal(signal.SIGTERM, orig_sigterm) | |
| 476 finally: | |
| 477 signal.signal(signal.SIGINT, orig_sigint) | |
| 478 | |
| 479 if _KILL_SWITCH.is_set(): | |
| 480 sys.exit(1) | |
| 481 | |
| 482 | |
| 483 # TODO(phajdan.jr): Consider integrating with json.JSONDecoder. | |
| 484 def re_encode(obj): | |
| 485 """Ensure consistent encoding for common python data structures.""" | |
| 486 if isinstance(obj, dict): | |
| 487 return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} | |
| 488 elif isinstance(obj, list): | |
| 489 return [re_encode(i) for i in obj] | |
| 490 elif isinstance(obj, (unicode, str)): | |
| 491 if isinstance(obj, str): | |
| 492 obj = obj.decode('utf-8', 'replace') | |
| 493 return obj.encode('utf-8', 'replace') | |
| 494 else: | |
| 495 return obj | |
| 496 | |
| 497 | |
| 498 def parse_args(args): | |
| 499 """Returns parsed command line arguments.""" | |
| 500 parser = argparse.ArgumentParser() | |
| 501 | |
| 502 subp = parser.add_subparsers() | |
| 503 | |
| 504 list_p = subp.add_parser('list', description='Print all test names') | |
| 505 list_p.set_defaults(func=lambda opts: run_list(opts.json)) | |
| 506 list_p.add_argument( | |
| 507 '--json', metavar='FILE', type=argparse.FileType('w'), | |
| 508 help='path to JSON output file') | |
| 509 | |
| 510 # TODO(phajdan.jr): support running a subset of tests. | |
| 511 run_p = subp.add_parser('run', description='Run the tests') | |
| 512 run_p.set_defaults(func=lambda opts: run_run(opts.jobs)) | |
| 513 run_p.add_argument( | |
| 514 '--jobs', metavar='N', type=int, | |
| 515 default=multiprocessing.cpu_count(), | |
| 516 help='run N jobs in parallel (default %(default)s)') | |
| 517 | |
| 518 return parser.parse_args(args) | |
| 519 | |
| 520 | |
| 521 def main(universe_view, raw_args, engine_flags): | |
| 522 """Runs simulation tests on a given repo of recipes. | |
| 523 | |
| 524 Args: | |
| 525 universe_view: an UniverseView object to operate on | |
| 526 raw_args: command line arguments to simulation_test_ng | |
| 527 engine_flags: recipe engine command-line flags | |
| 528 Returns: | |
| 529 Exit code | |
| 530 """ | |
| 531 global _UNIVERSE_VIEW | |
| 532 _UNIVERSE_VIEW = universe_view | |
| 533 global _ENGINE_FLAGS | |
| 534 _ENGINE_FLAGS = engine_flags | |
| 535 | |
| 536 args = parse_args(raw_args) | |
| 537 return args.func(args) | |
| OLD | NEW |