OLD | NEW |
---|---|
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 20 matching lines...) Expand all Loading... | |
31 The Manager runs a series of tests (TestType interface) against a set | 31 The Manager runs a series of tests (TestType interface) against a set |
32 of test files. If a test file fails a TestType, it returns a list of TestFailur e | 32 of test files. If a test file fails a TestType, it returns a list of TestFailur e |
33 objects to the Manager. The Manager then aggregates the TestFailures to | 33 objects to the Manager. The Manager then aggregates the TestFailures to |
34 create a final report. | 34 create a final report. |
35 """ | 35 """ |
36 | 36 |
37 import datetime | 37 import datetime |
38 import json | 38 import json |
39 import logging | 39 import logging |
40 import random | 40 import random |
41 import signal | |
41 import sys | 42 import sys |
42 import time | 43 import time |
43 | 44 |
44 from webkitpy.common.net.file_uploader import FileUploader | 45 from webkitpy.common.net.file_uploader import FileUploader |
45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde r | 46 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde r |
46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne r | 47 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne r |
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite r | 48 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite r |
48 from webkitpy.layout_tests.layout_package import json_results_generator | 49 from webkitpy.layout_tests.layout_package import json_results_generator |
49 from webkitpy.layout_tests.models import test_expectations | 50 from webkitpy.layout_tests.models import test_expectations |
50 from webkitpy.layout_tests.models import test_failures | 51 from webkitpy.layout_tests.models import test_failures |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
206 else: | 207 else: |
207 should_retry_failures = self._options.retry_failures | 208 should_retry_failures = self._options.retry_failures |
208 | 209 |
209 enabled_pixel_tests_in_retry = False | 210 enabled_pixel_tests_in_retry = False |
210 try: | 211 try: |
211 self._start_servers(tests_to_run) | 212 self._start_servers(tests_to_run) |
212 | 213 |
213 initial_results = self._run_tests(tests_to_run, tests_to_skip, self. _options.repeat_each, self._options.iterations, | 214 initial_results = self._run_tests(tests_to_run, tests_to_skip, self. _options.repeat_each, self._options.iterations, |
214 int(self._options.child_processes), retrying=False) | 215 int(self._options.child_processes), retrying=False) |
215 | 216 |
217 # Don't retry failures when interrupted by user or failures limit ex ception. | |
218 should_retry_failures = should_retry_failures and not (initial_resul ts.interrupted or initial_results.keyboard_interrupted) | |
219 | |
216 tests_to_retry = self._tests_to_retry(initial_results) | 220 tests_to_retry = self._tests_to_retry(initial_results) |
217 if should_retry_failures and tests_to_retry and not initial_results. interrupted: | 221 if should_retry_failures and tests_to_retry: |
218 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed () | 222 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed () |
219 | 223 |
220 _log.info('') | 224 _log.info('') |
221 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to _retry)) | 225 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to _retry)) |
222 _log.info('') | 226 _log.info('') |
223 retry_results = self._run_tests(tests_to_retry, tests_to_skip=se t(), repeat_each=1, iterations=1, | 227 retry_results = self._run_tests(tests_to_retry, tests_to_skip=se t(), repeat_each=1, iterations=1, |
224 num_workers=1, retrying=True) | 228 num_workers=1, retrying=True) |
225 | 229 |
226 if enabled_pixel_tests_in_retry: | 230 if enabled_pixel_tests_in_retry: |
227 self._options.pixel_tests = False | 231 self._options.pixel_tests = False |
(...skipping 14 matching lines...) Expand all Loading... | |
242 summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retr y) | 246 summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retr y) |
243 summarized_failing_results = test_run_results.summarize_results(self._po rt, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_r etry, only_include_failing=True) | 247 summarized_failing_results = test_run_results.summarize_results(self._po rt, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_r etry, only_include_failing=True) |
244 | 248 |
245 exit_code = summarized_failing_results['num_regressions'] | 249 exit_code = summarized_failing_results['num_regressions'] |
246 if not self._options.dry_run: | 250 if not self._options.dry_run: |
247 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results) | 251 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results) |
248 self._upload_json_files() | 252 self._upload_json_files() |
249 | 253 |
250 results_path = self._filesystem.join(self._results_directory, "resul ts.html") | 254 results_path = self._filesystem.join(self._results_directory, "resul ts.html") |
251 self._copy_results_html_file(results_path) | 255 self._copy_results_html_file(results_path) |
252 if self._options.show_results and (exit_code or (self._options.full_ results_html and initial_results.total_failures)): | 256 if initial_results.keyboard_interrupted: |
253 self._port.show_results_html_file(results_path) | 257 exit_code = signal.SIGINT + 128 |
Dirk Pranke
2013/09/12 21:29:06
Seeing as how this constant is actually needed acr
r.kasibhatla
2013/09/13 04:09:42
Uploaded new patch. As explained in previous comme
| |
254 | 258 else: |
255 self._printer.print_results(time.time() - start_time, initial_results, s ummarized_failing_results) | 259 if self._options.show_results and (exit_code or (self._options.f ull_results_html and initial_results.total_failures)): |
260 self._port.show_results_html_file(results_path) | |
261 self._printer.print_results(time.time() - start_time, initial_re sults, summarized_failing_results) | |
256 return test_run_results.RunDetails(exit_code, summarized_full_results, s ummarized_failing_results, initial_results, retry_results, enabled_pixel_tests_i n_retry) | 262 return test_run_results.RunDetails(exit_code, summarized_full_results, s ummarized_failing_results, initial_results, retry_results, enabled_pixel_tests_i n_retry) |
257 | 263 |
258 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, n um_workers, retrying): | 264 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, n um_workers, retrying): |
259 | 265 |
260 test_inputs = [] | 266 test_inputs = [] |
261 for _ in xrange(iterations): | 267 for _ in xrange(iterations): |
262 for test in tests_to_run: | 268 for test in tests_to_run: |
263 for _ in xrange(repeat_each): | 269 for _ in xrange(repeat_each): |
264 test_inputs.append(self._test_input_for_file(test)) | 270 test_inputs.append(self._test_input_for_file(test)) |
265 return self._runner.run_tests(self._expectations, test_inputs, tests_to_ skip, num_workers, retrying) | 271 return self._runner.run_tests(self._expectations, test_inputs, tests_to_ skip, num_workers, retrying) |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
415 return int(worker_name.split('/')[1]) if worker_name else -1 | 421 return int(worker_name.split('/')[1]) if worker_name else -1 |
416 | 422 |
417 stats = {} | 423 stats = {} |
418 for result in initial_results.results_by_name.values(): | 424 for result in initial_results.results_by_name.values(): |
419 if result.type != test_expectations.SKIP: | 425 if result.type != test_expectations.SKIP: |
420 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} | 426 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} |
421 stats_trie = {} | 427 stats_trie = {} |
422 for name, value in stats.iteritems(): | 428 for name, value in stats.iteritems(): |
423 json_results_generator.add_path_to_trie(name, value, stats_trie) | 429 json_results_generator.add_path_to_trie(name, value, stats_trie) |
424 return stats_trie | 430 return stats_trie |
OLD | NEW |