OLD | NEW |
---|---|
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 23 matching lines...) Expand all Loading... | |
34 create a final report. | 34 create a final report. |
35 """ | 35 """ |
36 | 36 |
37 import datetime | 37 import datetime |
38 import json | 38 import json |
39 import logging | 39 import logging |
40 import random | 40 import random |
41 import sys | 41 import sys |
42 import time | 42 import time |
43 | 43 |
44 from webkitpy.common.checkout.scm.detection import SCMDetector | |
Dirk Pranke
2014/06/17 17:58:58
I think this import is unneeded.
patro
2014/07/15 10:36:56
Done.
| |
44 from webkitpy.common.net.file_uploader import FileUploader | 45 from webkitpy.common.net.file_uploader import FileUploader |
46 from webkitpy.common.system.filesystem import FileSystem | |
Dirk Pranke
2014/06/17 17:58:58
this should be unneeded as well.
patro
2014/07/15 10:36:56
Done.
| |
45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde r | 47 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinde r |
46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne r | 48 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunne r |
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite r | 49 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWrite r |
48 from webkitpy.layout_tests.layout_package import json_results_generator | 50 from webkitpy.layout_tests.layout_package import json_results_generator |
49 from webkitpy.layout_tests.models import test_expectations | 51 from webkitpy.layout_tests.models import test_expectations |
50 from webkitpy.layout_tests.models import test_failures | 52 from webkitpy.layout_tests.models import test_failures |
51 from webkitpy.layout_tests.models import test_run_results | 53 from webkitpy.layout_tests.models import test_run_results |
52 from webkitpy.layout_tests.models.test_input import TestInput | 54 from webkitpy.layout_tests.models.test_input import TestInput |
53 | 55 |
54 _log = logging.getLogger(__name__) | 56 _log = logging.getLogger(__name__) |
(...skipping 15 matching lines...) Expand all Loading... | |
70 Args: | 72 Args: |
71 port: an object implementing port-specific | 73 port: an object implementing port-specific |
72 options: a dictionary of command line options | 74 options: a dictionary of command line options |
73 printer: a Printer object to record updates to. | 75 printer: a Printer object to record updates to. |
74 """ | 76 """ |
75 self._port = port | 77 self._port = port |
76 self._filesystem = port.host.filesystem | 78 self._filesystem = port.host.filesystem |
77 self._options = options | 79 self._options = options |
78 self._printer = printer | 80 self._printer = printer |
79 self._expectations = None | 81 self._expectations = None |
82 # Actions related to archiving of the results. | |
83 self._is_scm_initialized = False | |
84 self._scm = None | |
Dirk Pranke
2014/06/17 17:58:58
These scm fields don't seem to be used?
patro
2014/07/15 10:36:57
Done.
| |
85 self.ARCHIVED_PATH = None | |
86 self.REVISION_INFO = '' | |
80 | 87 |
81 self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR | 88 self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR |
82 self.PERF_SUBDIR = 'perf' | 89 self.PERF_SUBDIR = 'perf' |
83 self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR | 90 self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR |
84 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' | 91 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' |
85 self._http_server_started = False | 92 self._http_server_started = False |
86 self._websockets_server_started = False | 93 self._websockets_server_started = False |
87 | 94 |
88 self._results_directory = self._port.results_directory() | 95 self._results_directory = self._port.results_directory() |
89 self._finder = LayoutTestFinder(self._port, self._options) | 96 self._finder = LayoutTestFinder(self._port, self._options) |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
144 def _test_is_expected_missing(self, test_file): | 151 def _test_is_expected_missing(self, test_file): |
145 expectations = self._expectations.model().get_expectations(test_file) | 152 expectations = self._expectations.model().get_expectations(test_file) |
146 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations | 153 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations |
147 | 154 |
148 def _test_is_slow(self, test_file): | 155 def _test_is_slow(self, test_file): |
149 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) | 156 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) |
150 | 157 |
151 def needs_servers(self, test_names): | 158 def needs_servers(self, test_names): |
152 return any(self._test_requires_lock(test_name) for test_name in test_nam es) | 159 return any(self._test_requires_lock(test_name) for test_name in test_nam es) |
153 | 160 |
161 def _rename_results_folder_if_required(self): | |
162 try: | |
163 timestamp = time.strftime("%d-%m-%Y-%H-%M-%S", time.gmtime(self._fil esystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) | |
164 archived_name = ''.join((self._filesystem.basename(self._results_dir ectory), "_", timestamp)) | |
165 archived_path = self._filesystem.join(self._filesystem.dirname(self. _results_directory), archived_name) | |
166 self._filesystem.move(self._results_directory, archived_path) | |
167 return archived_path | |
Dirk Pranke
2014/06/17 17:58:58
I don't know that we should assume we can write ou
patro
2014/07/15 10:36:57
If the archived results are enabled by default the
| |
168 except OSError, e: | |
169 # It might be possible that results.html was not generated in previo us run, because the test | |
170 # run was interrupted even before testing started. In those cases, d on't archive the folder. | |
171 # Simply override the current folder contents with new results. | |
172 import errno | |
173 if e.errno == errno.EEXIST: | |
174 print "Found no result.html in the folder, ignoring this result and overwritten the folder..." | |
175 _log.info("Found no result.html in the folder, ignoring this res ult and overwritten the folder...") | |
176 return None | |
177 | |
154 def _set_up_run(self, test_names): | 178 def _set_up_run(self, test_names): |
155 self._printer.write_update("Checking build ...") | 179 self._printer.write_update("Checking build ...") |
156 if self._options.build: | 180 if self._options.build: |
157 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) | 181 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) |
158 if exit_code: | 182 if exit_code: |
159 _log.error("Build check failed") | 183 _log.error("Build check failed") |
160 return exit_code | 184 return exit_code |
161 | 185 |
162 # This must be started before we check the system dependencies, | 186 # This must be started before we check the system dependencies, |
163 # since the helper may do things to make the setup correct. | 187 # since the helper may do things to make the setup correct. |
164 if self._options.pixel_tests: | 188 if self._options.pixel_tests: |
165 self._printer.write_update("Starting pixel test helper ...") | 189 self._printer.write_update("Starting pixel test helper ...") |
166 self._port.start_helper() | 190 self._port.start_helper() |
167 | 191 |
168 # Check that the system dependencies (themes, fonts, ...) are correct. | 192 # Check that the system dependencies (themes, fonts, ...) are correct. |
169 if not self._options.nocheck_sys_deps: | 193 if not self._options.nocheck_sys_deps: |
170 self._printer.write_update("Checking system dependencies ...") | 194 self._printer.write_update("Checking system dependencies ...") |
171 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) | 195 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) |
172 if exit_code: | 196 if exit_code: |
173 self._port.stop_helper() | 197 self._port.stop_helper() |
174 return exit_code | 198 return exit_code |
175 | 199 |
176 if self._options.clobber_old_results: | 200 if self._options.clobber_old_results: |
177 self._clobber_old_results() | 201 self._clobber_old_results() |
178 | 202 |
203 # FIXME:: Here the condition has to be (archive_prev_results) and (total _archived_results < MAX_ARCHIVE_RESULTS) | |
204 if self._options.enable_versioned_results and self._filesystem.exists(se lf._results_directory): | |
205 # Get revision info for storing in archived dir. | |
206 self.REVISION_INFO = '' | |
207 # Rename the existing results folder for archiving. | |
208 self.ARCHIVED_PATH = self._rename_results_folder_if_required() | |
209 | |
179 # Create the output directory if it doesn't already exist. | 210 # Create the output directory if it doesn't already exist. |
180 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 211 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
181 | 212 |
182 self._port.setup_test_run() | 213 self._port.setup_test_run() |
183 return test_run_results.OK_EXIT_STATUS | 214 return test_run_results.OK_EXIT_STATUS |
184 | 215 |
185 def run(self, args): | 216 def run(self, args): |
186 """Run the tests and return a RunDetails object with the results.""" | 217 """Run the tests and return a RunDetails object with the results.""" |
187 start_time = time.time() | 218 start_time = time.time() |
188 self._printer.write_update("Collecting tests ...") | 219 self._printer.write_update("Collecting tests ...") |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
242 self._clean_up_run() | 273 self._clean_up_run() |
243 | 274 |
244 # Some crash logs can take a long time to be written out so look | 275 # Some crash logs can take a long time to be written out so look |
245 # for new logs after the test run finishes. | 276 # for new logs after the test run finishes. |
246 self._printer.write_update("looking for new crash logs") | 277 self._printer.write_update("looking for new crash logs") |
247 self._look_for_new_crash_logs(initial_results, start_time) | 278 self._look_for_new_crash_logs(initial_results, start_time) |
248 if retry_results: | 279 if retry_results: |
249 self._look_for_new_crash_logs(retry_results, start_time) | 280 self._look_for_new_crash_logs(retry_results, start_time) |
250 | 281 |
251 _log.debug("summarizing results") | 282 _log.debug("summarizing results") |
252 summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retr y) | 283 summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retr y, False, self.ARCHIVED_PATH) |
Dirk Pranke
2014/06/17 17:58:57
see comment in test_run_results.
patro
2014/07/15 10:36:57
Done.
| |
253 summarized_failing_results = test_run_results.summarize_results(self._po rt, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_r etry, only_include_failing=True) | 284 summarized_failing_results = test_run_results.summarize_results(self._po rt, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_r etry, only_include_failing=True) |
254 | 285 |
255 exit_code = summarized_failing_results['num_regressions'] | 286 exit_code = summarized_failing_results['num_regressions'] |
256 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: | 287 if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS: |
257 _log.warning('num regressions (%d) exceeds max exit status (%d)' % | 288 _log.warning('num regressions (%d) exceeds max exit status (%d)' % |
258 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) | 289 (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)) |
259 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS | 290 exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS |
260 | 291 |
261 if not self._options.dry_run: | 292 if not self._options.dry_run: |
262 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results) | 293 self._write_json_files(summarized_full_results, summarized_failing_r esults, initial_results) |
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 return int(worker_name.split('/')[1]) if worker_name else -1 | 468 return int(worker_name.split('/')[1]) if worker_name else -1 |
438 | 469 |
439 stats = {} | 470 stats = {} |
440 for result in initial_results.results_by_name.values(): | 471 for result in initial_results.results_by_name.values(): |
441 if result.type != test_expectations.SKIP: | 472 if result.type != test_expectations.SKIP: |
442 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} | 473 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} |
443 stats_trie = {} | 474 stats_trie = {} |
444 for name, value in stats.iteritems(): | 475 for name, value in stats.iteritems(): |
445 json_results_generator.add_path_to_trie(name, value, stats_trie) | 476 json_results_generator.add_path_to_trie(name, value, stats_trie) |
446 return stats_trie | 477 return stats_trie |
OLD | NEW |