Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(139)

Side by Side Diff: recipe_engine/test.py

Issue 2758923002: Handle unused recipe expectations in new 'test' command (Closed)
Patch Set: fixes Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | unittests/test_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2017 The LUCI Authors. All rights reserved. 1 # Copyright 2017 The LUCI Authors. All rights reserved.
2 # Use of this source code is governed under the Apache License, Version 2.0 2 # Use of this source code is governed under the Apache License, Version 2.0
3 # that can be found in the LICENSE file. 3 # that can be found in the LICENSE file.
4 4
5 from __future__ import print_function 5 from __future__ import print_function
6 6
7 import argparse 7 import argparse
8 import cStringIO 8 import cStringIO
9 import contextlib 9 import contextlib
10 import copy 10 import copy
11 import coverage 11 import coverage
12 import datetime 12 import datetime
13 import difflib 13 import difflib
14 import functools 14 import functools
15 import json 15 import json
16 import multiprocessing 16 import multiprocessing
17 import os 17 import os
18 import pprint 18 import pprint
19 import re 19 import re
20 import shutil
20 import signal 21 import signal
21 import sys 22 import sys
22 import tempfile 23 import tempfile
23 import traceback 24 import traceback
24 25
25 from . import checker 26 from . import checker
26 from . import config_types 27 from . import config_types
27 from . import loader 28 from . import loader
28 from . import run 29 from . import run
29 from . import step_runner 30 from . import step_runner
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 def __init__(self, check): 97 def __init__(self, check):
97 self.check = check 98 self.check = check
98 99
99 def format(self): 100 def format(self):
100 return self.check.format(indent=4) 101 return self.check.format(indent=4)
101 102
102 103
103 class TestResult(object): 104 class TestResult(object):
104 """Result of running a test.""" 105 """Result of running a test."""
105 106
106 def __init__(self, test_description, failures, coverage_data): 107 def __init__(self, test_description, failures, coverage_data,
108 generates_expectation):
107 self.test_description = test_description 109 self.test_description = test_description
108 self.failures = failures 110 self.failures = failures
109 self.coverage_data = coverage_data 111 self.coverage_data = coverage_data
112 self.generates_expectation = generates_expectation
110 113
111 114
112 class TestDescription(object): 115 class TestDescription(object):
113 """Identifies a specific test. 116 """Identifies a specific test.
114 117
115 Deliberately small and picklable for use with multiprocessing.""" 118 Deliberately small and picklable for use with multiprocessing."""
116 119
117 def __init__(self, recipe_name, test_name, expect_dir, covers): 120 def __init__(self, recipe_name, test_name, expect_dir, covers):
118 self.recipe_name = recipe_name 121 self.recipe_name = recipe_name
119 self.test_name = test_name 122 self.test_name = test_name
120 self.expect_dir = expect_dir 123 self.expect_dir = expect_dir
121 self.covers = covers 124 self.covers = covers
122 125
123 @property 126 @property
124 def full_name(self): 127 def full_name(self):
125 return '%s.%s' % (self.recipe_name, self.test_name) 128 return '%s.%s' % (self.recipe_name, self.test_name)
126 129
130 @property
131 def expectation_path(self):
132 return os.path.join(self.expect_dir, self.test_name + '.json')
133
127 134
128 def run_test(test_description, train=False): 135 def run_test(test_description, train=False):
129 """Runs a test. Returns TestResults object.""" 136 """Runs a test. Returns TestResults object."""
130 expected = None 137 expected = None
131 expectation_path = os.path.join( 138 if os.path.exists(test_description.expectation_path):
132 test_description.expect_dir, test_description.test_name + '.json') 139 with open(test_description.expectation_path) as f:
133 if os.path.exists(expectation_path):
134 with open(expectation_path) as f:
135 # TODO(phajdan.jr): why do we need to re-encode golden data files? 140 # TODO(phajdan.jr): why do we need to re-encode golden data files?
136 expected = re_encode(json.load(f)) 141 expected = re_encode(json.load(f))
137 142
138 actual, failed_checks, coverage_data = run_recipe( 143 actual, failed_checks, coverage_data = run_recipe(
139 test_description.recipe_name, test_description.test_name, 144 test_description.recipe_name, test_description.test_name,
140 test_description.covers) 145 test_description.covers)
141 actual = re_encode(actual) 146 actual = re_encode(actual)
142 147
143 failures = [] 148 failures = []
144 149
145 # TODO(phajdan.jr): handle exception (errors) in the recipe execution. 150 # TODO(phajdan.jr): handle exception (errors) in the recipe execution.
146 if failed_checks: 151 if failed_checks:
147 sys.stdout.write('C') 152 sys.stdout.write('C')
148 failures.extend([CheckFailure(c) for c in failed_checks]) 153 failures.extend([CheckFailure(c) for c in failed_checks])
149 elif actual != expected: 154 elif actual != expected:
150 if train: 155 if train:
151 expectation_dir = os.path.dirname(expectation_path) 156 expectation_dir = os.path.dirname(test_description.expectation_path)
152 if not os.path.exists(expectation_dir): 157 if not os.path.exists(expectation_dir):
153 os.makedirs(expectation_dir) 158 os.makedirs(expectation_dir)
154 with open(expectation_path, 'w') as f: 159 with open(test_description.expectation_path, 'wb') as f:
155 json.dump( 160 json.dump(
156 re_encode(actual), f, sort_keys=True, indent=2, 161 re_encode(actual), f, sort_keys=True, indent=2,
157 separators=(',', ': ')) 162 separators=(',', ': '))
158 sys.stdout.write('D') 163 sys.stdout.write('D')
159 else: 164 else:
160 diff = '\n'.join(difflib.unified_diff( 165 diff = '\n'.join(difflib.unified_diff(
161 pprint.pformat(expected).splitlines(), 166 pprint.pformat(expected).splitlines(),
162 pprint.pformat(actual).splitlines(), 167 pprint.pformat(actual).splitlines(),
163 fromfile='expected', tofile='actual', 168 fromfile='expected', tofile='actual',
164 n=4, lineterm='')) 169 n=4, lineterm=''))
165 170
166 failures.append(DiffFailure(diff)) 171 failures.append(DiffFailure(diff))
167 sys.stdout.write('F') 172 sys.stdout.write('F')
168 else: 173 else:
169 sys.stdout.write('.') 174 sys.stdout.write('.')
170 sys.stdout.flush() 175 sys.stdout.flush()
171 176
172 return TestResult(test_description, failures, coverage_data) 177 return TestResult(test_description, failures, coverage_data,
178 actual is not None)
173 179
174 180
175 def run_recipe(recipe_name, test_name, covers): 181 def run_recipe(recipe_name, test_name, covers):
176 """Runs the recipe under test in simulation mode. 182 """Runs the recipe under test in simulation mode.
177 183
178 Returns a tuple: 184 Returns a tuple:
179 - expectation data 185 - expectation data
180 - failed post-process checks (if any) 186 - failed post-process checks (if any)
181 - coverage data 187 - coverage data
182 """ 188 """
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 return (False, traceback.format_exc()) 383 return (False, traceback.format_exc())
378 return wrapper 384 return wrapper
379 385
380 386
381 @worker 387 @worker
382 def run_worker(test, train=False): 388 def run_worker(test, train=False):
383 """Worker for 'run' command (note decorator above).""" 389 """Worker for 'run' command (note decorator above)."""
384 return run_test(test, train=train) 390 return run_test(test, train=train)
385 391
386 392
393 def scan_for_expectations(root):
394 """Returns set of expectation paths recursively under |root|."""
395 collected_expectations = set()
396 for entry in os.listdir(root):
397 full_entry = os.path.join(root, entry)
398 if os.path.isdir(full_entry):
399 collected_expectations.update(scan_for_expectations(full_entry))
400 if not entry.endswith('.expected'):
401 continue
402 collected_expectations.add(full_entry)
403 if os.path.isdir(full_entry):
404 for subentry in os.listdir(full_entry):
405 if not subentry.endswith('.json'):
406 continue
407 full_subentry = os.path.join(full_entry, subentry)
408 collected_expectations.add(full_subentry)
409 return collected_expectations
410
411
387 def run_run(train, jobs): 412 def run_run(train, jobs):
388 """Implementation of the 'run' command.""" 413 """Implementation of the 'run' command."""
389 start_time = datetime.datetime.now() 414 start_time = datetime.datetime.now()
390 415
391 report_coverage_version() 416 report_coverage_version()
392 417
393 tests, coverage_data, uncovered_modules = get_tests() 418 tests, coverage_data, uncovered_modules = get_tests()
394 if uncovered_modules: 419 if uncovered_modules:
395 raise Exception('The following modules lack test coverage: %s' % ( 420 raise Exception('The following modules lack test coverage: %s' % (
396 ','.join(sorted(uncovered_modules)))) 421 ','.join(sorted(uncovered_modules))))
397 422
398 with kill_switch(): 423 with kill_switch():
399 pool = multiprocessing.Pool(jobs) 424 pool = multiprocessing.Pool(jobs)
400 results = pool.map(functools.partial(run_worker, train=train), tests) 425 results = pool.map(functools.partial(run_worker, train=train), tests)
401 426
402 print() 427 print()
403 428
429 used_expectations = set()
430
404 rc = 0 431 rc = 0
405 for success, details in results: 432 for success, details in results:
406 if success: 433 if success:
407 assert isinstance(details, TestResult) 434 assert isinstance(details, TestResult)
408 if details.failures: 435 if details.failures:
409 rc = 1 436 rc = 1
410 print('%s failed:' % details.test_description.full_name) 437 print('%s failed:' % details.test_description.full_name)
411 for failure in details.failures: 438 for failure in details.failures:
412 print(failure.format()) 439 print(failure.format())
413 coverage_data.update(details.coverage_data) 440 coverage_data.update(details.coverage_data)
441 if details.generates_expectation:
442 used_expectations.add(details.test_description.expectation_path)
443 used_expectations.add(
444 os.path.dirname(details.test_description.expectation_path))
414 else: 445 else:
415 rc = 1 446 rc = 1
416 print('Internal failure:') 447 print('Internal failure:')
417 print(details) 448 print(details)
418 449
419 try: 450 try:
420 # TODO(phajdan.jr): Add API to coverage to load data from memory. 451 # TODO(phajdan.jr): Add API to coverage to load data from memory.
421 with tempfile.NamedTemporaryFile(delete=False) as coverage_file: 452 with tempfile.NamedTemporaryFile(delete=False) as coverage_file:
422 coverage_data.write_file(coverage_file.name) 453 coverage_data.write_file(coverage_file.name)
423 454
424 cov = coverage.coverage( 455 cov = coverage.coverage(
425 data_file=coverage_file.name, config_file=False, omit=cover_omit()) 456 data_file=coverage_file.name, config_file=False, omit=cover_omit())
426 cov.load() 457 cov.load()
427 outf = cStringIO.StringIO() 458 outf = cStringIO.StringIO()
428 percentage = cov.report(file=outf, show_missing=True, skip_covered=True) 459 percentage = cov.report(file=outf, show_missing=True, skip_covered=True)
429 if int(percentage) != 100: 460 if int(percentage) != 100:
430 rc = 1 461 rc = 1
431 print(outf.getvalue()) 462 print(outf.getvalue())
432 print('FATAL: Insufficient coverage (%.f%%)' % int(percentage)) 463 print('FATAL: Insufficient coverage (%.f%%)' % int(percentage))
433 finally: 464 finally:
434 os.unlink(coverage_file.name) 465 os.unlink(coverage_file.name)
435 466
467 actual_expectations = set()
468 if os.path.exists(_UNIVERSE_VIEW.recipe_dir):
469 actual_expectations.update(scan_for_expectations(_UNIVERSE_VIEW.recipe_dir))
470 if os.path.exists(_UNIVERSE_VIEW.module_dir):
471 for module_entry in os.listdir(_UNIVERSE_VIEW.module_dir):
472 if os.path.isdir(module_entry):
473 actual_expectations.update(scan_for_expectations(
474 os.path.join(_UNIVERSE_VIEW.module_dir, module_entry)))
475 unused_expectations = actual_expectations.difference(used_expectations)
476 if unused_expectations:
477 if train:
478 for entry in unused_expectations:
479 if not os.path.exists(entry):
480 continue
481 if os.path.isdir(entry):
482 shutil.rmtree(entry)
483 else:
484 os.unlink(entry)
485 else:
486 rc = 1
487 print('FATAL: unused expectations found:')
488 print('\n'.join(sorted(unused_expectations)))
489
436 finish_time = datetime.datetime.now() 490 finish_time = datetime.datetime.now()
437 print('-' * 70) 491 print('-' * 70)
438 print('Ran %d tests in %0.3fs' % ( 492 print('Ran %d tests in %0.3fs' % (
439 len(tests), (finish_time - start_time).total_seconds())) 493 len(tests), (finish_time - start_time).total_seconds()))
440 print() 494 print()
441 print('OK' if rc == 0 else 'FAILED') 495 print('OK' if rc == 0 else 'FAILED')
442 496
443 return rc 497 return rc
444 498
445 499
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
536 Returns: 590 Returns:
537 Exit code 591 Exit code
538 """ 592 """
539 global _UNIVERSE_VIEW 593 global _UNIVERSE_VIEW
540 _UNIVERSE_VIEW = universe_view 594 _UNIVERSE_VIEW = universe_view
541 global _ENGINE_FLAGS 595 global _ENGINE_FLAGS
542 _ENGINE_FLAGS = engine_flags 596 _ENGINE_FLAGS = engine_flags
543 597
544 args = parse_args(raw_args) 598 args = parse_args(raw_args)
545 return args.func(args) 599 return args.func(args)
OLDNEW
« no previous file with comments | « no previous file | unittests/test_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698