Chromium Code Reviews| Index: recipe_engine/simulation_test_ng.py |
| diff --git a/recipe_engine/simulation_test_ng.py b/recipe_engine/simulation_test_ng.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..74a60adb17f1dc04ecf032656e750e24bef5cd9f |
| --- /dev/null |
| +++ b/recipe_engine/simulation_test_ng.py |
| @@ -0,0 +1,443 @@ |
| +# Copyright 2017 The LUCI Authors. All rights reserved. |
|
iannucci
2017/02/28 23:22:59
let's just make this the top level 'test' command:
Paweł Hajdan Jr.
2017/03/07 19:03:36
Done.
|
| +# Use of this source code is governed under the Apache License, Version 2.0 |
| +# that can be found in the LICENSE file. |
| + |
| +from __future__ import print_function |
| + |
| +import argparse |
| +import cStringIO |
| +import contextlib |
| +import copy |
| +import coverage |
| +import difflib |
| +import functools |
| +import json |
| +import multiprocessing |
| +import os |
| +import pprint |
| +import signal |
| +import sys |
| +import tempfile |
| +import traceback |
| + |
| +from . import checker |
| +from . import config_types |
| +from . import loader |
| +from . import run |
| +from . import step_runner |
| +from . import stream |
| + |
| + |
| +# These variables must be set in the dynamic scope of the functions in this |
| +# file. We do this instead of passing because they're not picklable, and |
| +# that's required by multiprocessing. |
| +_UNIVERSE_VIEW = None |
| +_ENGINE_FLAGS = None |
| + |
| + |
| +# An event to signal exit, for example on Ctrl-C. |
| +_KILL_SWITCH = multiprocessing.Event() |
| + |
| + |
| +@contextlib.contextmanager |
| +def coverage_context(include=None): |
| + c = coverage.coverage(config_file=False, include=include) |
| + c.start() |
| + try: |
| + yield c |
| + finally: |
| + c.stop() |
| + |
| + |
| +class TestFailure(object): |
| + def format(self): |
| + raise NotImplementedError() |
| + |
| + |
| +class DiffFailure(TestFailure): |
| + def __init__(self, diff): |
| + self.diff = diff |
| + |
| + def format(self): |
| + return self.diff |
| + |
| + |
| +class CheckFailure(TestFailure): |
| + def __init__(self, check): |
| + self.check = check |
| + |
| + def format(self): |
| + return self.check.format(indent=4) |
| + |
| + |
| +# TODO(phajdan.jr): Consider namedtuple instead. |
|
iannucci
2017/02/28 23:22:59
lets do these right away so the start as immutable
tandrii(chromium)
2017/03/01 17:36:44
+1 in general
but namedtuples carry inside a lot
|
| +class TestResult(object): |
| + def __init__(self, test, failures, coverage_data): |
| + self.test = test |
| + self.failures = failures |
| + self.coverage_data = coverage_data |
| + |
| + |
| +class Test(object): |
| + def __init__(self, recipe_name, test_data, expect_dir, covers): |
| + self.recipe_name = recipe_name |
| + self.test_data = test_data |
| + self.expect_dir = expect_dir |
| + self.covers = covers |
| + |
| + @property |
| + def test_name(self): |
| + return self.test_data.name |
| + |
| + @property |
| + def full_name(self): |
| + return '%s.%s' % (self.recipe_name, self.test_name) |
| + |
| + @property |
| + def expectation_path(self): |
|
iannucci
2017/02/28 23:22:59
todo (DEFINITELY not in this CL, and I'd be happy
Paweł Hajdan Jr.
2017/03/07 19:03:36
Yup, added a TODO.
|
| + return os.path.join(self.expect_dir, self.test_name + '.json') |
| + |
| + def test(self): |
| + # TODO(phajdan.jr): handle missing expectation files. |
| + with open(self.expectation_path) as f: |
| + # TODO(phajdan.jr): why do we need to re-encode golden data files? |
|
iannucci
2017/02/28 23:22:59
because of unicode strings, I think? we might be a
Paweł Hajdan Jr.
2017/03/07 19:03:36
I suspect so, and will look into this more when im
|
| + expected = re_encode(json.load(f)) |
| + |
| + actual, failed_checks, coverage_data = self._run_recipe() |
| + actual = re_encode(actual) |
| + |
| + failures = [] |
| + |
| + # TODO(phajdan.jr): handle exception (errors) in the recipe execution. |
| + if failed_checks: |
| + sys.stdout.write('C') |
| + failures.extend([CheckFailure(c) for c in failed_checks]) |
| + elif actual != expected: |
| + diff = '\n'.join(list(difflib.unified_diff( |
| + pprint.pformat(expected).splitlines(), |
| + pprint.pformat(actual).splitlines(), |
| + fromfile='expected', tofile='actual', |
| + n=4, lineterm=''))) |
| + |
| + failures.append(DiffFailure(diff)) |
| + sys.stdout.write('F') |
| + else: |
| + sys.stdout.write('.') |
| + sys.stdout.flush() |
| + |
| + return TestResult(self, failures, coverage_data) |
| + |
| + def _run_recipe(self): |
|
iannucci
2017/02/28 23:22:59
I'd strongly consider extracting this as a top-lev
Paweł Hajdan Jr.
2017/03/07 19:03:36
I was considering this. Unless/until it's reused b
tandrii(chromium)
2017/03/07 19:11:39
nit: it doesn't change locking semantics :)
|
| + config_types.ResetTostringFns() |
| + |
| + annotator = SimulationAnnotatorStreamEngine() |
| + with stream.StreamEngineInvariants.wrap(annotator) as stream_engine: |
| + runner = step_runner.SimulationStepRunner( |
| + stream_engine, self.test_data, annotator) |
| + |
| + props = self.test_data.properties.copy() |
| + props['recipe'] = self.recipe_name |
| + engine = run.RecipeEngine( |
| + runner, props, _UNIVERSE_VIEW, engine_flags=_ENGINE_FLAGS) |
| + with coverage_context(include=self.covers) as cov: |
| + # Run recipe loading under coverage context. This ensures we collect |
| + # coverage of all definitions and globals. |
| + recipe_script = _UNIVERSE_VIEW.load_recipe(self.recipe_name, engine=engine) |
| + |
| + api = loader.create_recipe_api( |
| + _UNIVERSE_VIEW.universe.package_deps.root_package, |
| + recipe_script.LOADED_DEPS, |
| + recipe_script.path, engine, self.test_data) |
| + result = engine.run(recipe_script, api, self.test_data.properties) |
| + |
| + raw_expectations = runner.steps_ran.copy() |
| + # Don't include tracebacks in expectations because they are too sensitive to |
| + # change. |
| + result.result.pop('traceback', None) |
| + raw_expectations[result.result['name']] = result.result |
| + |
| + failed_checks = [] |
| + |
| + for hook, args, kwargs, filename, lineno in self.test_data.post_process_hooks: |
| + input_odict = copy.deepcopy(raw_expectations) |
| + # we ignore the input_odict so that it never gets printed in full. Usually |
| + # the check invocation itself will index the input_odict or will use it only |
| + # for a key membership comparison, which provides enough debugging context. |
| + checker_obj = checker.Checker(filename, lineno, hook, args, kwargs, input_odict) |
| + rslt = hook(checker_obj, input_odict, *args, **kwargs) |
| + failed_checks += checker_obj.failed_checks |
| + if rslt is not None: |
| + msg = checker.VerifySubset(rslt, raw_expectations) |
| + if msg: |
| + raise PostProcessError('post_process: steps'+msg) |
| + # restore 'name' |
| + for k, v in rslt.iteritems(): |
| + if 'name' not in v: |
| + v['name'] = k |
| + raw_expectations = rslt |
| + |
| + # empty means drop expectation |
| + result_data = raw_expectations.values() if raw_expectations else None |
| + return (result_data, failed_checks, cov.get_data()) |
| + |
| + |
| +def parse_args(args): |
|
iannucci
2017/02/28 23:22:59
probably want to move this directly above main
Paweł Hajdan Jr.
2017/03/07 19:03:35
Done.
|
| + parser = argparse.ArgumentParser() |
| + |
| + subp = parser.add_subparsers() |
| + |
| + list_p = subp.add_parser('list', description='Print all test names') |
| + list_p.set_defaults(command='list') |
|
iannucci
2017/02/28 23:22:59
a technique that's useful (dnj recently pointed th
Paweł Hajdan Jr.
2017/03/07 19:03:36
Done.
|
| + |
| + test_p = subp.add_parser('test', description='Run the tests') |
|
iannucci
2017/02/28 23:22:59
no train?
Paweł Hajdan Jr.
2017/03/07 19:03:35
Not yet. Obviously I'll add it next.
|
| + test_p.set_defaults(command='test') |
| + test_p.add_argument( |
| + '--jobs', metavar='N', type=int, |
|
iannucci
2017/02/28 23:22:59
let's make this super-simple and drop this arg for
Paweł Hajdan Jr.
2017/03/07 19:03:36
If that's "for now", why do that if I already impl
|
| + default=multiprocessing.cpu_count(), |
| + help='run N jobs in parallel (default %(default)s)') |
|
iannucci
2017/02/28 23:22:59
missing feature that's probably essential: running
Paweł Hajdan Jr.
2017/03/07 19:03:35
Yes, I plan to also implement this feature.
Added
|
| + |
| + return parser.parse_args(args) |
| + |
| + |
| +def get_tests(): |
|
iannucci
2017/02/28 23:22:59
all of these functions need docstrings at some poi
Paweł Hajdan Jr.
2017/03/07 19:03:36
Done.
|
| + tests = [] |
| + coverage_data = coverage.CoverageData() |
| + |
| + all_modules = set(_UNIVERSE_VIEW.loop_over_recipe_modules()) |
| + covered_modules = set() |
| + |
| + base_covers = [] |
| + |
| + # Make sure disabling strict coverage also disables our additional check |
| + # for module coverage. Note that coverage will still raise an error if |
| + # the module is executed by any of the tests, but having less than 100% |
| + # coverage. |
| + for module in all_modules: |
| + # Run module loading under coverage context. This ensures we collect |
| + # coverage of all definitions and globals. |
| + coverage_include = os.path.join(_UNIVERSE_VIEW.module_dir, '*', '*.py') |
| + with coverage_context(include=coverage_include) as cov: |
| + mod = _UNIVERSE_VIEW.load_recipe_module(module) |
| + coverage_data.update(cov.get_data()) |
| + |
| + # Recipe modules can only be covered by tests inside the same module. |
| + # To make transition possible for existing code (which will require |
| + # writing tests), a temporary escape hatch is added. |
| + # TODO(phajdan.jr): remove DISABLE_STRICT_COVERAGE (crbug/693058). |
| + if (getattr(mod, 'DISABLE_STRICT_COVERAGE', False)): |
|
iannucci
2017/02/28 23:22:59
wdyt about just putting this logic in loader so th
Paweł Hajdan Jr.
2017/03/07 19:03:36
Done.
|
| + covered_modules.add(module) |
| + base_covers.append(os.path.join( |
| + _UNIVERSE_VIEW.module_dir, module, '*.py')) |
| + |
| + for recipe_path, recipe_name in _UNIVERSE_VIEW.loop_over_recipes(): |
| + try: |
| + covers = [recipe_path] + base_covers |
| + |
| + # Example/test recipes in a module always cover that module. |
| + if ':' in recipe_name: |
| + module, _ = recipe_name.split(':', 1) |
| + covered_modules.add(module) |
| + covers.append(os.path.join(_UNIVERSE_VIEW.module_dir, module, '*.py')) |
| + |
| + with coverage_context(include=covers) as cov: |
| + # Run recipe loading under coverage context. This ensures we collect |
| + # coverage of all definitions and globals. |
| + recipe = _UNIVERSE_VIEW.load_recipe(recipe_name) |
| + test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE_VIEW) |
| + |
| + root, name = os.path.split(recipe_path) |
| + name = os.path.splitext(name)[0] |
| + expect_dir = os.path.join(root, '%s.expected' % name) |
| + |
| + # Immediately convert to list to force running the generator under |
| + # coverage context. Otherwise coverage would only report executing |
| + # the function definition, not GenTests body. |
| + recipe_tests = list(recipe.gen_tests(test_api)) |
| + coverage_data.update(cov.get_data()) |
| + |
| + for test_data in recipe_tests: |
| + tests.append(Test(recipe_name, test_data, expect_dir, covers)) |
| + except: |
| + info = sys.exc_info() |
| + new_exec = Exception('While generating results for %r: %s: %s' % ( |
| + recipe_name, info[0].__name__, str(info[1]))) |
| + raise new_exec.__class__, new_exec, info[2] |
| + |
| + uncovered_modules = all_modules.difference(covered_modules) |
| + return (tests, coverage_data, uncovered_modules) |
| + |
| + |
| +def run_list(): |
| + tests, _coverage_data, _uncovered_modules = get_tests() |
| + print('\n'.join(sorted(t.full_name for t in tests))) |
| + return 0 |
| + |
| + |
| +def worker(f): |
| + """Wrapper for a multiprocessing worker function. |
| + |
| + This addresses known issues with multiprocessing workers: |
| + |
| + - they can hang on uncaught exceptions |
| + - we need explicit kill switch to clearly terminate parent""" |
| + @functools.wraps(f) |
| + def wrapper(*args, **kwargs): |
| + try: |
| + if _KILL_SWITCH.is_set(): |
| + return (False, 'kill switch') |
| + return (True, f(*args, **kwargs)) |
| + except Exception: |
| + return (False, traceback.format_exc()) |
| + return wrapper |
| + |
| + |
| +@worker |
| +def run_test_worker(test): |
| + return test.test() |
| + |
| + |
| +def cover_omit(): |
| + omit = [ ] |
| + |
| + mod_dir_base = _UNIVERSE_VIEW.module_dir |
| + if os.path.isdir(mod_dir_base): |
| + omit.append(os.path.join(mod_dir_base, '*', 'resources', '*')) |
| + |
| + # Exclude recipe engine files from simulation test coverage. Simulation tests |
| + # should cover "user space" recipe code (recipes and modules), not the engine. |
| + # The engine is covered by unit tests, not simulation tests. |
| + omit.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '*')) |
| + |
| + return omit |
| + |
| + |
| +def report_coverage_version(): |
| + print('Using coverage %s from %r' % (coverage.__version__, coverage.__file__)) |
| + |
| + |
| +def run_test(jobs): |
| + report_coverage_version() |
| + |
| + tests, coverage_data, uncovered_modules = get_tests() |
|
iannucci
2017/02/28 23:22:59
this can be slow on large repos (like build).
Can
Paweł Hajdan Jr.
2017/03/07 19:03:36
I considered this. I'd still prefer to keep the co
|
| + if uncovered_modules: |
| + raise Exception('The following modules lack test coverage: %s' % ( |
| + ','.join(sorted(uncovered_modules)))) |
| + |
| + with kill_switch(): |
| + pool = multiprocessing.Pool(jobs) |
| + results = pool.map(run_test_worker, tests) |
|
iannucci
2017/02/28 23:22:59
This will fail (and I suspect is the reason why yo
Paweł Hajdan Jr.
2017/03/07 19:03:36
Yes, that's the reason and with that change, it wo
|
| + |
| + print() |
| + |
| + rc = 0 |
| + for success, details in results: |
| + if success: |
| + assert isinstance(details, TestResult) |
| + if details.failures: |
| + rc = 1 |
| + print('%s failed:' % details.test.full_name) |
| + for failure in details.failures: |
| + print(failure.format()) |
| + coverage_data.update(details.coverage_data) |
| + else: |
| + rc = 1 |
| + print('Internal failure:') |
| + print(details) |
| + |
| + try: |
| + with tempfile.NamedTemporaryFile(delete=False) as coverage_file: |
| + coverage_data.write_file(coverage_file.name) |
| + |
| + cov = coverage.coverage(data_file=coverage_file.name, config_file=False, omit=cover_omit()) |
| + cov.load() |
| + outf = cStringIO.StringIO() |
| + percentage = cov.report(file=outf, show_missing=True, skip_covered=True) |
| + if int(percentage) != 100: |
| + rc = 1 |
| + print(outf.getvalue()) |
| + finally: |
| + os.unlink(coverage_file.name) |
| + |
| + return rc |
| + |
| + |
| +class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine): |
| + def __init__(self): |
| + self._step_buffer_map = {} |
| + super(SimulationAnnotatorStreamEngine, self).__init__( |
| + self.step_buffer(None)) |
| + |
| + def step_buffer(self, step_name): |
| + return self._step_buffer_map.setdefault(step_name, cStringIO.StringIO()) |
| + |
| + def new_step_stream(self, step_config): |
| + return self._create_step_stream(step_config, |
| + self.step_buffer(step_config.name)) |
| + |
| + |
| +class PostProcessError(ValueError): |
| + pass |
| + |
| + |
| +def handle_killswitch(*_): |
| + _KILL_SWITCH.set() |
| + |
| + # Reset the signal to DFL so that double ctrl-C kills us for sure. |
| + signal.signal(signal.SIGINT, signal.SIG_DFL) |
| + signal.signal(signal.SIGTERM, signal.SIG_DFL) |
| + |
| + |
| +@contextlib.contextmanager |
| +def kill_switch(): |
| + orig_sigint = signal.signal(signal.SIGINT, handle_killswitch) |
| + try: |
| + orig_sigterm = signal.signal(signal.SIGTERM, handle_killswitch) |
| + try: |
| + yield |
| + finally: |
| + signal.signal(signal.SIGTERM, orig_sigterm) |
| + finally: |
| + signal.signal(signal.SIGINT, orig_sigint) |
| + |
| + if _KILL_SWITCH.is_set(): |
| + sys.exit(1) |
| + |
| + |
| +def re_encode(obj): |
| + """Ensure consistent encoding for common python data structures.""" |
| + if isinstance(obj, dict): |
| + return {re_encode(k): re_encode(v) for k, v in obj.iteritems()} |
| + elif isinstance(obj, list): |
| + return [re_encode(i) for i in obj] |
| + elif isinstance(obj, (unicode, str)): |
| + if isinstance(obj, str): |
| + obj = obj.decode('utf-8', 'replace') |
| + return obj.encode('utf-8', 'replace') |
| + else: |
| + return obj |
| + |
| + |
| +def main(universe_view, raw_args, engine_flags): |
| + """Runs simulation tests on a given repo of recipes. |
| + |
| + Args: |
| + universe_view: an UniverseView object to operate on |
| + raw_args: command line arguments to simulation_test_ng |
| + engine_flags: recipe engine command-line flags |
| + Returns: |
| + Exit code |
| + """ |
| + global _UNIVERSE_VIEW |
| + _UNIVERSE_VIEW = universe_view |
| + global _ENGINE_FLAGS |
| + _ENGINE_FLAGS = engine_flags |
| + |
| + args = parse_args(raw_args) |
| + |
| + if args.command == 'list': |
| + return run_list() |
| + elif args.command == 'test': |
| + return run_test(args.jobs) |
| + |
| + print('Unrecognized subcommand %r' % (args.command,), file=sys.stderr) |
| + return 1 |