Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(198)

Side by Side Diff: recipe_engine/test.py

Issue 2758923002: Handle unused recipe expectations in new 'test' command (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | unittests/test_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2017 The LUCI Authors. All rights reserved. 1 # Copyright 2017 The LUCI Authors. All rights reserved.
2 # Use of this source code is governed under the Apache License, Version 2.0 2 # Use of this source code is governed under the Apache License, Version 2.0
3 # that can be found in the LICENSE file. 3 # that can be found in the LICENSE file.
4 4
5 from __future__ import print_function 5 from __future__ import print_function
6 6
7 import argparse 7 import argparse
8 import cStringIO 8 import cStringIO
9 import contextlib 9 import contextlib
10 import copy 10 import copy
11 import coverage 11 import coverage
12 import datetime 12 import datetime
13 import difflib 13 import difflib
14 import functools 14 import functools
15 import json 15 import json
16 import multiprocessing 16 import multiprocessing
17 import os 17 import os
18 import pprint 18 import pprint
19 import re 19 import re
20 import shutil
20 import signal 21 import signal
21 import sys 22 import sys
22 import tempfile 23 import tempfile
23 import traceback 24 import traceback
24 25
25 from . import checker 26 from . import checker
26 from . import config_types 27 from . import config_types
27 from . import loader 28 from . import loader
28 from . import run 29 from . import run
29 from . import step_runner 30 from . import step_runner
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 def __init__(self, check): 97 def __init__(self, check):
97 self.check = check 98 self.check = check
98 99
99 def format(self): 100 def format(self):
100 return self.check.format(indent=4) 101 return self.check.format(indent=4)
101 102
102 103
103 class TestResult(object): 104 class TestResult(object):
104 """Result of running a test.""" 105 """Result of running a test."""
105 106
106 def __init__(self, test_description, failures, coverage_data): 107 def __init__(self, test_description, failures, coverage_data,
108 recipe_run_result):
107 self.test_description = test_description 109 self.test_description = test_description
108 self.failures = failures 110 self.failures = failures
109 self.coverage_data = coverage_data 111 self.coverage_data = coverage_data
112 self.recipe_run_result = recipe_run_result
110 113
111 114
112 class TestDescription(object): 115 class TestDescription(object):
113 """Identifies a specific test. 116 """Identifies a specific test.
114 117
115 Deliberately small and picklable for use with multiprocessing.""" 118 Deliberately small and picklable for use with multiprocessing."""
116 119
117 def __init__(self, recipe_name, test_name, expect_dir, covers): 120 def __init__(self, recipe_name, test_name, expect_dir, covers):
118 self.recipe_name = recipe_name 121 self.recipe_name = recipe_name
119 self.test_name = test_name 122 self.test_name = test_name
120 self.expect_dir = expect_dir 123 self.expect_dir = expect_dir
121 self.covers = covers 124 self.covers = covers
122 125
123 @property 126 @property
124 def full_name(self): 127 def full_name(self):
125 return '%s.%s' % (self.recipe_name, self.test_name) 128 return '%s.%s' % (self.recipe_name, self.test_name)
126 129
130 @property
131 def expectation_path(self):
132 return os.path.join(self.expect_dir, self.test_name + '.json')
133
127 134
128 def run_test(test_description, train=False): 135 def run_test(test_description, train=False):
129 """Runs a test. Returns TestResults object.""" 136 """Runs a test. Returns TestResults object."""
130 expected = None 137 expected = None
131 expectation_path = os.path.join( 138 if os.path.exists(test_description.expectation_path):
132 test_description.expect_dir, test_description.test_name + '.json') 139 with open(test_description.expectation_path) as f:
133 if os.path.exists(expectation_path):
134 with open(expectation_path) as f:
135 # TODO(phajdan.jr): why do we need to re-encode golden data files? 140 # TODO(phajdan.jr): why do we need to re-encode golden data files?
136 expected = re_encode(json.load(f)) 141 expected = re_encode(json.load(f))
137 142
138 actual, failed_checks, coverage_data = run_recipe( 143 actual, failed_checks, coverage_data = run_recipe(
139 test_description.recipe_name, test_description.test_name, 144 test_description.recipe_name, test_description.test_name,
140 test_description.covers) 145 test_description.covers)
141 actual = re_encode(actual) 146 actual = re_encode(actual)
142 147
143 failures = [] 148 failures = []
144 149
145 # TODO(phajdan.jr): handle exception (errors) in the recipe execution. 150 # TODO(phajdan.jr): handle exception (errors) in the recipe execution.
146 if failed_checks: 151 if failed_checks:
147 sys.stdout.write('C') 152 sys.stdout.write('C')
148 failures.extend([CheckFailure(c) for c in failed_checks]) 153 failures.extend([CheckFailure(c) for c in failed_checks])
149 elif actual != expected: 154 elif actual != expected:
150 if train: 155 if train:
151 expectation_dir = os.path.dirname(expectation_path) 156 expectation_dir = os.path.dirname(test_description.expectation_path)
152 if not os.path.exists(expectation_dir): 157 if not os.path.exists(expectation_dir):
153 os.makedirs(expectation_dir) 158 os.makedirs(expectation_dir)
154 with open(expectation_path, 'w') as f: 159 with open(test_description.expectation_path, 'w') as f:
iannucci 2017/03/17 22:04:19 I think you may want to open 'wb' to prevent windo
Paweł Hajdan Jr. 2017/03/20 11:29:14 Done.
155 json.dump( 160 json.dump(
156 re_encode(actual), f, sort_keys=True, indent=2, 161 re_encode(actual), f, sort_keys=True, indent=2,
157 separators=(',', ': ')) 162 separators=(',', ': '))
158 sys.stdout.write('D') 163 sys.stdout.write('D')
159 else: 164 else:
160 diff = '\n'.join(difflib.unified_diff( 165 diff = '\n'.join(difflib.unified_diff(
161 pprint.pformat(expected).splitlines(), 166 pprint.pformat(expected).splitlines(),
162 pprint.pformat(actual).splitlines(), 167 pprint.pformat(actual).splitlines(),
163 fromfile='expected', tofile='actual', 168 fromfile='expected', tofile='actual',
164 n=4, lineterm='')) 169 n=4, lineterm=''))
165 170
166 failures.append(DiffFailure(diff)) 171 failures.append(DiffFailure(diff))
167 sys.stdout.write('F') 172 sys.stdout.write('F')
168 else: 173 else:
169 sys.stdout.write('.') 174 sys.stdout.write('.')
170 sys.stdout.flush() 175 sys.stdout.flush()
171 176
172 return TestResult(test_description, failures, coverage_data) 177 return TestResult(test_description, failures, coverage_data, actual)
iannucci 2017/03/17 22:04:19 'actual' is probably large and will be slow to pic
Paweł Hajdan Jr. 2017/03/20 11:29:14 This didn't seem to have measurable effect on the
173 178
174 179
175 def run_recipe(recipe_name, test_name, covers): 180 def run_recipe(recipe_name, test_name, covers):
176 """Runs the recipe under test in simulation mode. 181 """Runs the recipe under test in simulation mode.
177 182
178 Returns a tuple: 183 Returns a tuple:
179 - expectation data 184 - expectation data
180 - failed post-process checks (if any) 185 - failed post-process checks (if any)
181 - coverage data 186 - coverage data
182 """ 187 """
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 return (False, traceback.format_exc()) 382 return (False, traceback.format_exc())
378 return wrapper 383 return wrapper
379 384
380 385
381 @worker 386 @worker
382 def run_worker(test, train=False): 387 def run_worker(test, train=False):
383 """Worker for 'run' command (note decorator above).""" 388 """Worker for 'run' command (note decorator above)."""
384 return run_test(test, train=train) 389 return run_test(test, train=train)
385 390
386 391
392 def scan_for_expectations(root):
393 """Returns set of expectation paths recursively under |root|."""
394 collected_expectations = set()
395 for entry in os.listdir(root):
396 full_entry = os.path.join(root, entry)
397 if os.path.isdir(full_entry):
398 collected_expectations.update(scan_for_expectations(full_entry))
399 if entry.endswith('.expected'):
400 collected_expectations.add(full_entry)
401 if os.path.isdir(full_entry):
402 for subentry in os.listdir(full_entry):
403 full_subentry = os.path.join(full_entry, subentry)
iannucci 2017/03/17 22:04:19 only want subentry if it ends with .json. Otherwis
Paweł Hajdan Jr. 2017/03/20 11:29:14 Good point, done. Also added tests.
404 collected_expectations.add(full_subentry)
405 return collected_expectations
406
407
387 def run_run(train, jobs): 408 def run_run(train, jobs):
388 """Implementation of the 'run' command.""" 409 """Implementation of the 'run' command."""
389 start_time = datetime.datetime.now() 410 start_time = datetime.datetime.now()
390 411
391 report_coverage_version() 412 report_coverage_version()
392 413
393 tests, coverage_data, uncovered_modules = get_tests() 414 tests, coverage_data, uncovered_modules = get_tests()
394 if uncovered_modules: 415 if uncovered_modules:
395 raise Exception('The following modules lack test coverage: %s' % ( 416 raise Exception('The following modules lack test coverage: %s' % (
396 ','.join(sorted(uncovered_modules)))) 417 ','.join(sorted(uncovered_modules))))
397 418
398 with kill_switch(): 419 with kill_switch():
399 pool = multiprocessing.Pool(jobs) 420 pool = multiprocessing.Pool(jobs)
400 results = pool.map(functools.partial(run_worker, train=train), tests) 421 results = pool.map(functools.partial(run_worker, train=train), tests)
401 422
402 print() 423 print()
403 424
425 used_expectations = set()
426
404 rc = 0 427 rc = 0
405 for success, details in results: 428 for success, details in results:
406 if success: 429 if success:
407 assert isinstance(details, TestResult) 430 assert isinstance(details, TestResult)
408 if details.failures: 431 if details.failures:
409 rc = 1 432 rc = 1
410 print('%s failed:' % details.test_description.full_name) 433 print('%s failed:' % details.test_description.full_name)
411 for failure in details.failures: 434 for failure in details.failures:
412 print(failure.format()) 435 print(failure.format())
413 coverage_data.update(details.coverage_data) 436 coverage_data.update(details.coverage_data)
437 if details.recipe_run_result is not None:
438 used_expectations.add(details.test_description.expectation_path)
439 used_expectations.add(
440 os.path.dirname(details.test_description.expectation_path))
414 else: 441 else:
415 rc = 1 442 rc = 1
416 print('Internal failure:') 443 print('Internal failure:')
417 print(details) 444 print(details)
418 445
419 try: 446 try:
420 # TODO(phajdan.jr): Add API to coverage to load data from memory. 447 # TODO(phajdan.jr): Add API to coverage to load data from memory.
421 with tempfile.NamedTemporaryFile(delete=False) as coverage_file: 448 with tempfile.NamedTemporaryFile(delete=False) as coverage_file:
422 coverage_data.write_file(coverage_file.name) 449 coverage_data.write_file(coverage_file.name)
423 450
424 cov = coverage.coverage( 451 cov = coverage.coverage(
425 data_file=coverage_file.name, config_file=False, omit=cover_omit()) 452 data_file=coverage_file.name, config_file=False, omit=cover_omit())
426 cov.load() 453 cov.load()
427 outf = cStringIO.StringIO() 454 outf = cStringIO.StringIO()
428 percentage = cov.report(file=outf, show_missing=True, skip_covered=True) 455 percentage = cov.report(file=outf, show_missing=True, skip_covered=True)
429 if int(percentage) != 100: 456 if int(percentage) != 100:
430 rc = 1 457 rc = 1
431 print(outf.getvalue()) 458 print(outf.getvalue())
432 print('FATAL: Insufficient coverage (%.f%%)' % int(percentage)) 459 print('FATAL: Insufficient coverage (%.f%%)' % int(percentage))
433 finally: 460 finally:
434 os.unlink(coverage_file.name) 461 os.unlink(coverage_file.name)
435 462
463 actual_expectations = set()
464 if os.path.exists(_UNIVERSE_VIEW.recipe_dir):
465 actual_expectations.update(scan_for_expectations(_UNIVERSE_VIEW.recipe_dir))
466 if os.path.exists(_UNIVERSE_VIEW.module_dir):
467 for module_entry in os.listdir(_UNIVERSE_VIEW.module_dir):
468 if os.path.isdir(module_entry):
469 actual_expectations.update(scan_for_expectations(
470 os.path.join(_UNIVERSE_VIEW.module_dir, module_entry)))
471 unused_expectations = actual_expectations.difference(used_expectations)
472 if unused_expectations:
473 if train:
474 for entry in unused_expectations:
475 if not os.path.exists(entry):
476 continue
477 if os.path.isdir(entry):
478 shutil.rmtree(entry)
479 else:
480 os.unlink(entry)
481 else:
482 rc = 1
483 print('FATAL: unused expectations found:')
484 print('\n'.join(sorted(unused_expectations)))
485
436 finish_time = datetime.datetime.now() 486 finish_time = datetime.datetime.now()
437 print('-' * 70) 487 print('-' * 70)
438 print('Ran %d tests in %0.3fs' % ( 488 print('Ran %d tests in %0.3fs' % (
439 len(tests), (finish_time - start_time).total_seconds())) 489 len(tests), (finish_time - start_time).total_seconds()))
440 print() 490 print()
441 print('OK' if rc == 0 else 'FAILED') 491 print('OK' if rc == 0 else 'FAILED')
442 492
443 return rc 493 return rc
444 494
445 495
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
536 Returns: 586 Returns:
537 Exit code 587 Exit code
538 """ 588 """
539 global _UNIVERSE_VIEW 589 global _UNIVERSE_VIEW
540 _UNIVERSE_VIEW = universe_view 590 _UNIVERSE_VIEW = universe_view
541 global _ENGINE_FLAGS 591 global _ENGINE_FLAGS
542 _ENGINE_FLAGS = engine_flags 592 _ENGINE_FLAGS = engine_flags
543 593
544 args = parse_args(raw_args) 594 args = parse_args(raw_args)
545 return args.func(args) 595 return args.func(args)
OLDNEW
« no previous file with comments | « no previous file | unittests/test_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698