Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(280)

Side by Side Diff: recipe_engine/simulation_test_ng.py

Issue 2721613004: simulation_test_ng: initial CL (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2017 The LUCI Authors. All rights reserved.
iannucci 2017/02/28 23:22:59 let's just make this the top level 'test' command:
Paweł Hajdan Jr. 2017/03/07 19:03:36 Done.
2 # Use of this source code is governed under the Apache License, Version 2.0
3 # that can be found in the LICENSE file.
4
5 from __future__ import print_function
6
7 import argparse
8 import cStringIO
9 import contextlib
10 import copy
11 import coverage
12 import difflib
13 import functools
14 import json
15 import multiprocessing
16 import os
17 import pprint
18 import signal
19 import sys
20 import tempfile
21 import traceback
22
23 from . import checker
24 from . import config_types
25 from . import loader
26 from . import run
27 from . import step_runner
28 from . import stream
29
30
31 # These variables must be set in the dynamic scope of the functions in this
32 # file. We do this instead of passing because they're not picklable, and
33 # that's required by multiprocessing.
34 _UNIVERSE_VIEW = None
35 _ENGINE_FLAGS = None
36
37
38 # An event to signal exit, for example on Ctrl-C.
39 _KILL_SWITCH = multiprocessing.Event()
40
41
42 @contextlib.contextmanager
43 def coverage_context(include=None):
44 c = coverage.coverage(config_file=False, include=include)
45 c.start()
46 try:
47 yield c
48 finally:
49 c.stop()
50
51
52 class TestFailure(object):
53 def format(self):
54 raise NotImplementedError()
55
56
57 class DiffFailure(TestFailure):
58 def __init__(self, diff):
59 self.diff = diff
60
61 def format(self):
62 return self.diff
63
64
65 class CheckFailure(TestFailure):
66 def __init__(self, check):
67 self.check = check
68
69 def format(self):
70 return self.check.format(indent=4)
71
72
73 # TODO(phajdan.jr): Consider namedtuple instead.
iannucci 2017/02/28 23:22:59 lets do these right away so the start as immutable
tandrii(chromium) 2017/03/01 17:36:44 +1 in general but namedtuples carry inside a lot
74 class TestResult(object):
75 def __init__(self, test, failures, coverage_data):
76 self.test = test
77 self.failures = failures
78 self.coverage_data = coverage_data
79
80
81 class Test(object):
82 def __init__(self, recipe_name, test_data, expect_dir, covers):
83 self.recipe_name = recipe_name
84 self.test_data = test_data
85 self.expect_dir = expect_dir
86 self.covers = covers
87
88 @property
89 def test_name(self):
90 return self.test_data.name
91
92 @property
93 def full_name(self):
94 return '%s.%s' % (self.recipe_name, self.test_name)
95
96 @property
97 def expectation_path(self):
iannucci 2017/02/28 23:22:59 todo (DEFINITELY not in this CL, and I'd be happy
Paweł Hajdan Jr. 2017/03/07 19:03:36 Yup, added a TODO.
98 return os.path.join(self.expect_dir, self.test_name + '.json')
99
100 def test(self):
101 # TODO(phajdan.jr): handle missing expectation files.
102 with open(self.expectation_path) as f:
103 # TODO(phajdan.jr): why do we need to re-encode golden data files?
iannucci 2017/02/28 23:22:59 because of unicode strings, I think? we might be a
Paweł Hajdan Jr. 2017/03/07 19:03:36 I suspect so, and will look into this more when im
104 expected = re_encode(json.load(f))
105
106 actual, failed_checks, coverage_data = self._run_recipe()
107 actual = re_encode(actual)
108
109 failures = []
110
111 # TODO(phajdan.jr): handle exception (errors) in the recipe execution.
112 if failed_checks:
113 sys.stdout.write('C')
114 failures.extend([CheckFailure(c) for c in failed_checks])
115 elif actual != expected:
116 diff = '\n'.join(list(difflib.unified_diff(
117 pprint.pformat(expected).splitlines(),
118 pprint.pformat(actual).splitlines(),
119 fromfile='expected', tofile='actual',
120 n=4, lineterm='')))
121
122 failures.append(DiffFailure(diff))
123 sys.stdout.write('F')
124 else:
125 sys.stdout.write('.')
126 sys.stdout.flush()
127
128 return TestResult(self, failures, coverage_data)
129
130 def _run_recipe(self):
iannucci 2017/02/28 23:22:59 I'd strongly consider extracting this as a top-lev
Paweł Hajdan Jr. 2017/03/07 19:03:36 I was considering this. Unless/until it's reused b
tandrii(chromium) 2017/03/07 19:11:39 nit: it doesn't change locking semantics :)
131 config_types.ResetTostringFns()
132
133 annotator = SimulationAnnotatorStreamEngine()
134 with stream.StreamEngineInvariants.wrap(annotator) as stream_engine:
135 runner = step_runner.SimulationStepRunner(
136 stream_engine, self.test_data, annotator)
137
138 props = self.test_data.properties.copy()
139 props['recipe'] = self.recipe_name
140 engine = run.RecipeEngine(
141 runner, props, _UNIVERSE_VIEW, engine_flags=_ENGINE_FLAGS)
142 with coverage_context(include=self.covers) as cov:
143 # Run recipe loading under coverage context. This ensures we collect
144 # coverage of all definitions and globals.
145 recipe_script = _UNIVERSE_VIEW.load_recipe(self.recipe_name, engine=engi ne)
146
147 api = loader.create_recipe_api(
148 _UNIVERSE_VIEW.universe.package_deps.root_package,
149 recipe_script.LOADED_DEPS,
150 recipe_script.path, engine, self.test_data)
151 result = engine.run(recipe_script, api, self.test_data.properties)
152
153 raw_expectations = runner.steps_ran.copy()
154 # Don't include tracebacks in expectations because they are too sensitive to
155 # change.
156 result.result.pop('traceback', None)
157 raw_expectations[result.result['name']] = result.result
158
159 failed_checks = []
160
161 for hook, args, kwargs, filename, lineno in self.test_data.post_process_ho oks:
162 input_odict = copy.deepcopy(raw_expectations)
163 # we ignore the input_odict so that it never gets printed in full. Usual ly
164 # the check invocation itself will index the input_odict or will use it only
165 # for a key membership comparison, which provides enough debugging conte xt.
166 checker_obj = checker.Checker(filename, lineno, hook, args, kwargs, inpu t_odict)
167 rslt = hook(checker_obj, input_odict, *args, **kwargs)
168 failed_checks += checker_obj.failed_checks
169 if rslt is not None:
170 msg = checker.VerifySubset(rslt, raw_expectations)
171 if msg:
172 raise PostProcessError('post_process: steps'+msg)
173 # restore 'name'
174 for k, v in rslt.iteritems():
175 if 'name' not in v:
176 v['name'] = k
177 raw_expectations = rslt
178
179 # empty means drop expectation
180 result_data = raw_expectations.values() if raw_expectations else None
181 return (result_data, failed_checks, cov.get_data())
182
183
184 def parse_args(args):
iannucci 2017/02/28 23:22:59 probably want to move this directly above main
Paweł Hajdan Jr. 2017/03/07 19:03:35 Done.
185 parser = argparse.ArgumentParser()
186
187 subp = parser.add_subparsers()
188
189 list_p = subp.add_parser('list', description='Print all test names')
190 list_p.set_defaults(command='list')
iannucci 2017/02/28 23:22:59 a technique that's useful (dnj recently pointed th
Paweł Hajdan Jr. 2017/03/07 19:03:36 Done.
191
192 test_p = subp.add_parser('test', description='Run the tests')
iannucci 2017/02/28 23:22:59 no train?
Paweł Hajdan Jr. 2017/03/07 19:03:35 Not yet. Obviously I'll add it next.
193 test_p.set_defaults(command='test')
194 test_p.add_argument(
195 '--jobs', metavar='N', type=int,
iannucci 2017/02/28 23:22:59 let's make this super-simple and drop this arg for
Paweł Hajdan Jr. 2017/03/07 19:03:36 If that's "for now", why do that if I already impl
196 default=multiprocessing.cpu_count(),
197 help='run N jobs in parallel (default %(default)s)')
iannucci 2017/02/28 23:22:59 missing feature that's probably essential: running
Paweł Hajdan Jr. 2017/03/07 19:03:35 Yes, I plan to also implement this feature. Added
198
199 return parser.parse_args(args)
200
201
202 def get_tests():
iannucci 2017/02/28 23:22:59 all of these functions need docstrings at some poi
Paweł Hajdan Jr. 2017/03/07 19:03:36 Done.
203 tests = []
204 coverage_data = coverage.CoverageData()
205
206 all_modules = set(_UNIVERSE_VIEW.loop_over_recipe_modules())
207 covered_modules = set()
208
209 base_covers = []
210
211 # Make sure disabling strict coverage also disables our additional check
212 # for module coverage. Note that coverage will still raise an error if
213 # the module is executed by any of the tests, but having less than 100%
214 # coverage.
215 for module in all_modules:
216 # Run module loading under coverage context. This ensures we collect
217 # coverage of all definitions and globals.
218 coverage_include = os.path.join(_UNIVERSE_VIEW.module_dir, '*', '*.py')
219 with coverage_context(include=coverage_include) as cov:
220 mod = _UNIVERSE_VIEW.load_recipe_module(module)
221 coverage_data.update(cov.get_data())
222
223 # Recipe modules can only be covered by tests inside the same module.
224 # To make transition possible for existing code (which will require
225 # writing tests), a temporary escape hatch is added.
226 # TODO(phajdan.jr): remove DISABLE_STRICT_COVERAGE (crbug/693058).
227 if (getattr(mod, 'DISABLE_STRICT_COVERAGE', False)):
iannucci 2017/02/28 23:22:59 wdyt about just putting this logic in loader so th
Paweł Hajdan Jr. 2017/03/07 19:03:36 Done.
228 covered_modules.add(module)
229 base_covers.append(os.path.join(
230 _UNIVERSE_VIEW.module_dir, module, '*.py'))
231
232 for recipe_path, recipe_name in _UNIVERSE_VIEW.loop_over_recipes():
233 try:
234 covers = [recipe_path] + base_covers
235
236 # Example/test recipes in a module always cover that module.
237 if ':' in recipe_name:
238 module, _ = recipe_name.split(':', 1)
239 covered_modules.add(module)
240 covers.append(os.path.join(_UNIVERSE_VIEW.module_dir, module, '*.py'))
241
242 with coverage_context(include=covers) as cov:
243 # Run recipe loading under coverage context. This ensures we collect
244 # coverage of all definitions and globals.
245 recipe = _UNIVERSE_VIEW.load_recipe(recipe_name)
246 test_api = loader.create_test_api(recipe.LOADED_DEPS, _UNIVERSE_VIEW)
247
248 root, name = os.path.split(recipe_path)
249 name = os.path.splitext(name)[0]
250 expect_dir = os.path.join(root, '%s.expected' % name)
251
252 # Immediately convert to list to force running the generator under
253 # coverage context. Otherwise coverage would only report executing
254 # the function definition, not GenTests body.
255 recipe_tests = list(recipe.gen_tests(test_api))
256 coverage_data.update(cov.get_data())
257
258 for test_data in recipe_tests:
259 tests.append(Test(recipe_name, test_data, expect_dir, covers))
260 except:
261 info = sys.exc_info()
262 new_exec = Exception('While generating results for %r: %s: %s' % (
263 recipe_name, info[0].__name__, str(info[1])))
264 raise new_exec.__class__, new_exec, info[2]
265
266 uncovered_modules = all_modules.difference(covered_modules)
267 return (tests, coverage_data, uncovered_modules)
268
269
270 def run_list():
271 tests, _coverage_data, _uncovered_modules = get_tests()
272 print('\n'.join(sorted(t.full_name for t in tests)))
273 return 0
274
275
276 def worker(f):
277 """Wrapper for a multiprocessing worker function.
278
279 This addresses known issues with multiprocessing workers:
280
281 - they can hang on uncaught exceptions
282 - we need explicit kill switch to clearly terminate parent"""
283 @functools.wraps(f)
284 def wrapper(*args, **kwargs):
285 try:
286 if _KILL_SWITCH.is_set():
287 return (False, 'kill switch')
288 return (True, f(*args, **kwargs))
289 except Exception:
290 return (False, traceback.format_exc())
291 return wrapper
292
293
294 @worker
295 def run_test_worker(test):
296 return test.test()
297
298
299 def cover_omit():
300 omit = [ ]
301
302 mod_dir_base = _UNIVERSE_VIEW.module_dir
303 if os.path.isdir(mod_dir_base):
304 omit.append(os.path.join(mod_dir_base, '*', 'resources', '*'))
305
306 # Exclude recipe engine files from simulation test coverage. Simulation tests
307 # should cover "user space" recipe code (recipes and modules), not the engine.
308 # The engine is covered by unit tests, not simulation tests.
309 omit.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '*'))
310
311 return omit
312
313
314 def report_coverage_version():
315 print('Using coverage %s from %r' % (coverage.__version__, coverage.__file__))
316
317
318 def run_test(jobs):
319 report_coverage_version()
320
321 tests, coverage_data, uncovered_modules = get_tests()
iannucci 2017/02/28 23:22:59 this can be slow on large repos (like build). Can
Paweł Hajdan Jr. 2017/03/07 19:03:36 I considered this. I'd still prefer to keep the co
322 if uncovered_modules:
323 raise Exception('The following modules lack test coverage: %s' % (
324 ','.join(sorted(uncovered_modules))))
325
326 with kill_switch():
327 pool = multiprocessing.Pool(jobs)
328 results = pool.map(run_test_worker, tests)
iannucci 2017/02/28 23:22:59 This will fail (and I suspect is the reason why yo
Paweł Hajdan Jr. 2017/03/07 19:03:36 Yes, that's the reason and with that change, it wo
329
330 print()
331
332 rc = 0
333 for success, details in results:
334 if success:
335 assert isinstance(details, TestResult)
336 if details.failures:
337 rc = 1
338 print('%s failed:' % details.test.full_name)
339 for failure in details.failures:
340 print(failure.format())
341 coverage_data.update(details.coverage_data)
342 else:
343 rc = 1
344 print('Internal failure:')
345 print(details)
346
347 try:
348 with tempfile.NamedTemporaryFile(delete=False) as coverage_file:
349 coverage_data.write_file(coverage_file.name)
350
351 cov = coverage.coverage(data_file=coverage_file.name, config_file=False, omi t=cover_omit())
352 cov.load()
353 outf = cStringIO.StringIO()
354 percentage = cov.report(file=outf, show_missing=True, skip_covered=True)
355 if int(percentage) != 100:
356 rc = 1
357 print(outf.getvalue())
358 finally:
359 os.unlink(coverage_file.name)
360
361 return rc
362
363
364 class SimulationAnnotatorStreamEngine(stream.AnnotatorStreamEngine):
365 def __init__(self):
366 self._step_buffer_map = {}
367 super(SimulationAnnotatorStreamEngine, self).__init__(
368 self.step_buffer(None))
369
370 def step_buffer(self, step_name):
371 return self._step_buffer_map.setdefault(step_name, cStringIO.StringIO())
372
373 def new_step_stream(self, step_config):
374 return self._create_step_stream(step_config,
375 self.step_buffer(step_config.name))
376
377
378 class PostProcessError(ValueError):
379 pass
380
381
382 def handle_killswitch(*_):
383 _KILL_SWITCH.set()
384
385 # Reset the signal to DFL so that double ctrl-C kills us for sure.
386 signal.signal(signal.SIGINT, signal.SIG_DFL)
387 signal.signal(signal.SIGTERM, signal.SIG_DFL)
388
389
390 @contextlib.contextmanager
391 def kill_switch():
392 orig_sigint = signal.signal(signal.SIGINT, handle_killswitch)
393 try:
394 orig_sigterm = signal.signal(signal.SIGTERM, handle_killswitch)
395 try:
396 yield
397 finally:
398 signal.signal(signal.SIGTERM, orig_sigterm)
399 finally:
400 signal.signal(signal.SIGINT, orig_sigint)
401
402 if _KILL_SWITCH.is_set():
403 sys.exit(1)
404
405
406 def re_encode(obj):
407 """Ensure consistent encoding for common python data structures."""
408 if isinstance(obj, dict):
409 return {re_encode(k): re_encode(v) for k, v in obj.iteritems()}
410 elif isinstance(obj, list):
411 return [re_encode(i) for i in obj]
412 elif isinstance(obj, (unicode, str)):
413 if isinstance(obj, str):
414 obj = obj.decode('utf-8', 'replace')
415 return obj.encode('utf-8', 'replace')
416 else:
417 return obj
418
419
420 def main(universe_view, raw_args, engine_flags):
421 """Runs simulation tests on a given repo of recipes.
422
423 Args:
424 universe_view: an UniverseView object to operate on
425 raw_args: command line arguments to simulation_test_ng
426 engine_flags: recipe engine command-line flags
427 Returns:
428 Exit code
429 """
430 global _UNIVERSE_VIEW
431 _UNIVERSE_VIEW = universe_view
432 global _ENGINE_FLAGS
433 _ENGINE_FLAGS = engine_flags
434
435 args = parse_args(raw_args)
436
437 if args.command == 'list':
438 return run_list()
439 elif args.command == 'test':
440 return run_test(args.jobs)
441
442 print('Unrecognized subcommand %r' % (args.command,), file=sys.stderr)
443 return 1
OLDNEW
« recipe_engine/post_process.py ('K') | « recipe_engine/post_process.py ('k') | recipes.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698