Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: recipe_engine/test.py

Issue 2721613004: simulation_test_ng: initial CL (Closed)
Patch Set: review Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « recipe_engine/simulation_test.py ('k') | recipes.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 # Copyright 2017 The LUCI Authors. All rights reserved.
2 # Use of this source code is governed under the Apache License, Version 2.0
3 # that can be found in the LICENSE file.
4
5 from __future__ import print_function
6
7 import argparse
8 import cStringIO
9 import contextlib
10 import copy
11 import coverage
12 import datetime
13 import difflib
14 import functools
15 import json
16 import multiprocessing
17 import os
18 import pprint
19 import signal
20 import sys
21 import tempfile
22 import traceback
23
24 from . import checker
25 from . import config_types
26 from . import loader
27 from . import run
28 from . import step_runner
29 from . import stream
30
31
32 # These variables must be set in the dynamic scope of the functions in this
33 # file. We do this instead of passing because they're not picklable, and
34 # that's required by multiprocessing.
35 _UNIVERSE_VIEW = None
36 _ENGINE_FLAGS = None
37
38
39 # An event to signal exit, for example on Ctrl-C.
40 _KILL_SWITCH = multiprocessing.Event()
41
42
43 class PostProcessError(ValueError):
44 """Exception raised when any of the post-process hooks fails."""
45 pass
46
47
48 @contextlib.contextmanager
49 def coverage_context(include=None):
50 """Context manager that records coverage data."""
51 c = coverage.coverage(config_file=False, include=include)
52
53 # Sometimes our strict include lists will result in a run
54 # not adding any coverage info. That's okay, avoid output spam.
55 c._warn_no_data = False
56
57 c.start()
58 try:
59 yield c
60 finally:
61 c.stop()
62
63
64 class TestFailure(object):
65 """Generic class for different kinds of test failures."""
66
67 def format(self):
68 """Returns a human-readable description of the failure."""
69 raise NotImplementedError()
70
71
72 class DiffFailure(TestFailure):
73 """Failure when simulated recipe commands don't match recorded expectations.
74 """
75
76 def __init__(self, diff):
77 self.diff = diff
78
79 def format(self):
80 return self.diff
81
82
83 class CheckFailure(TestFailure):
84 """Failure when any of the post-process checks fails."""
85
86 def __init__(self, check):
87 self.check = check
88
89 def format(self):
90 return self.check.format(indent=4)
91
92
93 # TODO(phajdan.jr): Consider namedtuple instead.
94 class TestResult(object):
95 def __init__(self, test, failures, coverage_data):
96 self.test = test
97 self.failures = failures
98 self.coverage_data = coverage_data
99
100
101 class Test(object):
102 def __init__(self, recipe_name, test_data, expect_dir, covers):
103 self.recipe_name = recipe_name
104 self.test_data = test_data
105 self.expect_dir = expect_dir
106 self.covers = covers
107
108 @property
109 def test_name(self):
110 return self.test_data.name
111
112 @property
113 def full_name(self):
114 return '%s.%s' % (self.recipe_name, self.test_name)
115
116 @property
117 def expectation_path(self):
118 """Returns path where serialized expectation data is stored."""
119 # TODO(phajdan.jr): move expectation tree outside of the recipe tree.
120 return os.path.join(self.expect_dir, self.test_name + '.json')
121
122 def run(self):
123 """Runs the test. Returns TestResults object."""
124 # TODO(phajdan.jr): handle missing expectation files.
125 with open(self.expectation_path) as f:
126 # TODO(phajdan.jr): why do we need to re-encode golden data files?
127 expected = re_encode(json.load(f))
128
129 actual, failed_checks, coverage_data = self._run_recipe()
130 actual = re_encode(actual)
131
132 failures = []
133
134 # TODO(phajdan.jr): handle exception (errors) in the recipe execution.
135 if failed_checks:
136 sys.stdout.write('C')
137 failures.extend([CheckFailure(c) for c in failed_checks])
138 elif actual != expected:
139 diff = '\n'.join(list(difflib.unified_diff(
140 pprint.pformat(expected).splitlines(),
141 pprint.pformat(actual).splitlines(),
142 fromfile='expected', tofile='actual',
143 n=4, lineterm='')))
144
145 failures.append(DiffFailure(diff))
146 sys.stdout.write('F')
147 else:
148 sys.stdout.write('.')
149 sys.stdout.flush()
150
151 return TestResult(self, failures, coverage_data)
152
153 def _run_recipe(self):
154 """Runs the recipe under test in simulation mode.
155
156 Returns a tuple:
157 - expectation data
158 - failed post-process checks (if any)
159 - coverage data
160 """
161 config_types.ResetTostringFns()
162
163 annotator = SimulationAnnotatorStreamEngine()
164 with stream.StreamEngineInvariants.wrap(annotator) as stream_engine:
165 runner = step_runner.SimulationStepRunner(
166 stream_engine, self.test_data, annotator)
167
168 props = self.test_data.properties.copy()
169 props['recipe'] = self.recipe_name
170 engine = run.RecipeEngine(
171 runner, props, _UNIVERSE_VIEW, engine_flags=_ENGINE_FLAGS)
172 with coverage_context(include=self.covers) as cov:
173 # Run recipe loading under coverage context. This ensures we collect
174 # coverage of all definitions and globals.
175 recipe_script = _UNIVERSE_VIEW.load_recipe(
176 self.recipe_name, engine=engine)
177
178 api = loader.create_recipe_api(
179 _UNIVERSE_VIEW.universe.package_deps.root_package,
180 recipe_script.LOADED_DEPS,
181 recipe_script.path, engine, self.test_data)
182 result = engine.run(recipe_script, api, self.test_data.properties)
183
184 raw_expectations = runner.steps_ran.copy()
185 # Don't include tracebacks in expectations because they are too sensitive
186 # to change.
187 result.result.pop('traceback', None)
188 raw_expectations[result.result['name']] = result.result
189
190 failed_checks = []
191
192 for hook, args, kwargs, filename, lineno \
193 in self.test_data.post_process_hooks:
iannucci 2017/03/07 19:13:38 I think you misunderstood me :). This implementati
Paweł Hajdan Jr. 2017/03/07 22:53:02 Unless I'm severely missing something, this did in
194 input_odict = copy.deepcopy(raw_expectations)
195 # We ignore the input_odict so that it never gets printed in full.
196 # Usually the check invocation itself will index the input_odict or
197 # will use it only for a key membership comparison, which provides
198 # enough debugging context.
199 checker_obj = checker.Checker(
200 filename, lineno, hook, args, kwargs, input_odict)
201 rslt = hook(checker_obj, input_odict, *args, **kwargs)
202 failed_checks += checker_obj.failed_checks
203 if rslt is not None:
204 msg = checker.VerifySubset(rslt, raw_expectations)
205 if msg:
206 raise PostProcessError('post_process: steps'+msg)
207 # restore 'name'
208 for k, v in rslt.iteritems():
209 if 'name' not in v:
210 v['name'] = k
211 raw_expectations = rslt
212
213 # empty means drop expectation
214 result_data = raw_expectations.values() if raw_expectations else None
215 return (result_data, failed_checks, cov.get_data())
iannucci 2017/03/07 19:13:38 pickling the entire result data across processes i
Paweł Hajdan Jr. 2017/03/07 22:53:02 Again, I'd prefer not to worry about premature opt
216
217
218 def get_tests():
219 """Returns a list of tests for current recipe package."""
220 tests = []
221 coverage_data = coverage.CoverageData()
222
223 all_modules = set(_UNIVERSE_VIEW.loop_over_recipe_modules())
224 covered_modules = set()
225
226 base_covers = []
227
228 # Make sure disabling strict coverage also disables our additional check
229 # for module coverage. Note that coverage will still raise an error if
230 # the module is executed by any of the tests, but having less than 100%
231 # coverage.
232 for module in all_modules:
233 # Run module loading under coverage context. This ensures we collect
234 # coverage of all definitions and globals.
235 coverage_include = os.path.join(_UNIVERSE_VIEW.module_dir, '*', '*.py')
236 with coverage_context(include=coverage_include) as cov:
237 mod = _UNIVERSE_VIEW.load_recipe_module(module)
238 coverage_data.update(cov.get_data())
239
240 # Recipe modules can only be covered by tests inside the same module.
241 # To make transition possible for existing code (which will require
242 # writing tests), a temporary escape hatch is added.
243 # TODO(phajdan.jr): remove DISABLE_STRICT_COVERAGE (crbug/693058).
244 if mod.DISABLE_STRICT_COVERAGE:
245 covered_modules.add(module)
246 base_covers.append(os.path.join(
247 _UNIVERSE_VIEW.module_dir, module, '*.py'))
248
249 for recipe_path, recipe_name in _UNIVERSE_VIEW.loop_over_recipes():
250 try:
251 covers = [recipe_path] + base_covers
252
253 # Example/test recipes in a module always cover that module.
254 if ':' in recipe_name:
255 module, _ = recipe_name.split(':', 1)
256 covered_modules.add(module)
257 covers.append(os.path.join(_UNIVERSE_VIEW.module_dir, module, '*.py'))
258
259 with coverage_context(include=covers) as cov:
260 # Run recipe loading under coverage context. This ensures we collect
261 # coverage of all definitions and globals.
262 recipe = _UNIVERSE_VIEW.load_recipe(recipe_name)
263 test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE_VIEW)
264
265 root, name = os.path.split(recipe_path)
266 name = os.path.splitext(name)[0]
267 expect_dir = os.path.join(root, '%s.expected' % name)
268
269 # Immediately convert to list to force running the generator under
270 # coverage context. Otherwise coverage would only report executing
271 # the function definition, not GenTests body.
272 recipe_tests = list(recipe.gen_tests(test_api))
273 coverage_data.update(cov.get_data())
274
275 for test_data in recipe_tests:
276 tests.append(Test(recipe_name, test_data, expect_dir, covers))
277 except:
278 info = sys.exc_info()
279 new_exec = Exception('While generating results for %r: %s: %s' % (
280 recipe_name, info[0].__name__, str(info[1])))
281 raise new_exec.__class__, new_exec, info[2]
282
283 uncovered_modules = all_modules.difference(covered_modules)
284 return (tests, coverage_data, uncovered_modules)
285
286
287 def run_list():
288 """Implementation of the 'list' command."""
289 tests, _coverage_data, _uncovered_modules = get_tests()
290 print('\n'.join(sorted(t.full_name for t in tests)))
291 return 0
292
293
294 def cover_omit():
295 """Returns list of patterns to omit from coverage analysis."""
296 omit = [ ]
297
298 mod_dir_base = _UNIVERSE_VIEW.module_dir
299 if os.path.isdir(mod_dir_base):
300 omit.append(os.path.join(mod_dir_base, '*', 'resources', '*'))
301
302 # Exclude recipe engine files from simulation test coverage. Simulation tests
303 # should cover "user space" recipe code (recipes and modules), not the engine.
304 # The engine is covered by unit tests, not simulation tests.
305 omit.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '*'))
306
307 return omit
308
309
310 def report_coverage_version():
311 """Prints info about coverage module (for debugging)."""
312 print('Using coverage %s from %r' % (coverage.__version__, coverage.__file__))
313
314
315 def worker(f):
316 """Wrapper for a multiprocessing worker function.
317
318 This addresses known issues with multiprocessing workers:
319
320 - they can hang on uncaught exceptions
321 - we need explicit kill switch to clearly terminate parent"""
322 @functools.wraps(f)
323 def wrapper(*args, **kwargs):
324 try:
325 if _KILL_SWITCH.is_set():
326 return (False, 'kill switch')
327 return (True, f(*args, **kwargs))
328 except Exception:
329 return (False, traceback.format_exc())
330 return wrapper
331
332
333 @worker
334 def run_worker(test):
335 """Worker for 'run' command (note decorator above)."""
336 return test.run()
337
338
339 def run_run(jobs):
340 """Implementation of the 'run' command."""
341 start_time = datetime.datetime.now()
342
343 report_coverage_version()
344
345 tests, coverage_data, uncovered_modules = get_tests()
346 if uncovered_modules:
347 raise Exception('The following modules lack test coverage: %s' % (
348 ','.join(sorted(uncovered_modules))))
349
350 with kill_switch():
351 pool = multiprocessing.Pool(jobs)
352 results = pool.map(run_worker, tests)
353
354 print()
355
356 rc = 0
357 for success, details in results:
358 if success:
359 assert isinstance(details, TestResult)
360 if details.failures:
361 rc = 1
362 print('%s failed:' % details.test.full_name)
363 for failure in details.failures:
364 print(failure.format())
365 coverage_data.update(details.coverage_data)
366 else:
367 rc = 1
368 print('Internal failure:')
369 print(details)
370
371 try:
372 # TODO(phajdan.jr): Add API to coverage to load data from memory.
373 with tempfile.NamedTemporaryFile(delete=False) as coverage_file:
374 coverage_data.write_file(coverage_file.name)
375
376 cov = coverage.coverage(
377 data_file=coverage_file.name, config_file=False, omit=cover_omit())
378 cov.load()
379 outf = cStringIO.StringIO()
380 percentage = cov.report(file=outf, show_missing=True, skip_covered=True)
381 if int(percentage) != 100:
382 rc = 1
383 print(outf.getvalue())
384 finally:
385 os.unlink(coverage_file.name)
386
387 finish_time = datetime.datetime.now()
388 print('-' * 70)
389 print('Ran %d tests in %0.3fs' % (
390 len(tests), (finish_time - start_time).total_seconds()))
391 print()
392 print('OK' if rc == 0 else 'FAILED')
393
394 return rc
395
396
397 class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine):
398 """Stream engine which just records generated commands."""
399
400 def __init__(self):
401 self._step_buffer_map = {}
402 super(SimulationAnnotatorStreamEngine, self).__init__(
403 self.step_buffer(None))
404
405 def step_buffer(self, step_name):
406 return self._step_buffer_map.setdefault(step_name, cStringIO.StringIO())
407
408 def new_step_stream(self, step_config):
409 return self._create_step_stream(step_config,
410 self.step_buffer(step_config.name))
411
412
413 def handle_killswitch(*_):
414 """Function invoked by ctrl-c. Signals worker processes to exit."""
415 _KILL_SWITCH.set()
416
417 # Reset the signal to DFL so that double ctrl-C kills us for sure.
418 signal.signal(signal.SIGINT, signal.SIG_DFL)
419 signal.signal(signal.SIGTERM, signal.SIG_DFL)
420
421
422 @contextlib.contextmanager
423 def kill_switch():
424 """Context manager to handle ctrl-c properly with multiprocessing."""
425 orig_sigint = signal.signal(signal.SIGINT, handle_killswitch)
426 try:
427 orig_sigterm = signal.signal(signal.SIGTERM, handle_killswitch)
428 try:
429 yield
430 finally:
431 signal.signal(signal.SIGTERM, orig_sigterm)
432 finally:
433 signal.signal(signal.SIGINT, orig_sigint)
434
435 if _KILL_SWITCH.is_set():
436 sys.exit(1)
437
438
439 def re_encode(obj):
440 """Ensure consistent encoding for common python data structures."""
441 if isinstance(obj, dict):
442 return {re_encode(k): re_encode(v) for k, v in obj.iteritems()}
443 elif isinstance(obj, list):
444 return [re_encode(i) for i in obj]
445 elif isinstance(obj, (unicode, str)):
446 if isinstance(obj, str):
447 obj = obj.decode('utf-8', 'replace')
448 return obj.encode('utf-8', 'replace')
449 else:
450 return obj
451
452
453 def parse_args(args):
454 """Returns parsed command line arguments."""
455 parser = argparse.ArgumentParser()
456
457 subp = parser.add_subparsers()
458
459 list_p = subp.add_parser('list', description='Print all test names')
460 list_p.set_defaults(func=lambda opts: run_list())
461
462 # TODO(phajdan.jr): support running a subset of tests.
463 run_p = subp.add_parser('run', description='Run the tests')
464 run_p.set_defaults(func=lambda opts: run_run(opts.jobs))
465 run_p.add_argument(
466 '--jobs', metavar='N', type=int,
467 default=multiprocessing.cpu_count(),
468 help='run N jobs in parallel (default %(default)s)')
469
470 return parser.parse_args(args)
471
472
473 def main(universe_view, raw_args, engine_flags):
474 """Runs simulation tests on a given repo of recipes.
475
476 Args:
477 universe_view: an UniverseView object to operate on
478 raw_args: command line arguments to simulation_test_ng
479 engine_flags: recipe engine command-line flags
480 Returns:
481 Exit code
482 """
483 global _UNIVERSE_VIEW
484 _UNIVERSE_VIEW = universe_view
485 global _ENGINE_FLAGS
486 _ENGINE_FLAGS = engine_flags
487
488 args = parse_args(raw_args)
489 return args.func(args)
OLDNEW
« no previous file with comments | « recipe_engine/simulation_test.py ('k') | recipes.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698