OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
56 from webkitpy.tool import grammar | 56 from webkitpy.tool import grammar |
57 | 57 |
58 _log = logging.getLogger(__name__) | 58 _log = logging.getLogger(__name__) |
59 | 59 |
60 # Builder base URL where we have the archived test results. | 60 # Builder base URL where we have the archived test results. |
61 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" | 61 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" |
62 | 62 |
63 TestExpectations = test_expectations.TestExpectations | 63 TestExpectations = test_expectations.TestExpectations |
64 | 64 |
65 | 65 |
66 | |
67 class Manager(object): | 66 class Manager(object): |
68 """A class for managing running a series of layout tests.""" | 67 """A class for managing running a series of layout tests.""" |
69 | 68 |
70 def __init__(self, port, options, printer): | 69 def __init__(self, port, options, printer): |
71 """Initialize test runner data structures. | 70 """Initialize test runner data structures. |
72 | 71 |
73 Args: | 72 Args: |
74 port: An object implementing platform-specific functionality. | 73 port: An object implementing platform-specific functionality. |
75 options: An options argument which contains command line options. | 74 options: An options argument which contains command line options. |
76 printer: A Printer object to record updates to. | 75 printer: A Printer object to record updates to. |
(...skipping 15 matching lines...) Expand all Loading... |
92 self._http_server_started = False | 91 self._http_server_started = False |
93 self._wptserve_started = False | 92 self._wptserve_started = False |
94 self._websockets_server_started = False | 93 self._websockets_server_started = False |
95 | 94 |
96 self._results_directory = self._port.results_directory() | 95 self._results_directory = self._port.results_directory() |
97 self._finder = LayoutTestFinder(self._port, self._options) | 96 self._finder = LayoutTestFinder(self._port, self._options) |
98 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) | 97 self._runner = LayoutTestRunner(self._options, self._port, self._printer
, self._results_directory, self._test_is_slow) |
99 | 98 |
100 def _collect_tests(self, args): | 99 def _collect_tests(self, args): |
101 return self._finder.find_tests(args, test_list=self._options.test_list, | 100 return self._finder.find_tests(args, test_list=self._options.test_list, |
102 fastest_percentile=self._options.fastest) | 101 fastest_percentile=self._options.fastest) |
103 | 102 |
104 def _is_http_test(self, test): | 103 def _is_http_test(self, test): |
105 return ( | 104 return ( |
106 test.startswith(self.HTTP_SUBDIR) or | 105 test.startswith(self.HTTP_SUBDIR) or |
107 self._is_websocket_test(test) or | 106 self._is_websocket_test(test) or |
108 self.VIRTUAL_HTTP_SUBDIR in test | 107 self.VIRTUAL_HTTP_SUBDIR in test |
109 ) | 108 ) |
110 | 109 |
111 def _is_inspector_test(self, test): | 110 def _is_inspector_test(self, test): |
112 return self.INSPECTOR_SUBDIR in test | 111 return self.INSPECTOR_SUBDIR in test |
(...skipping 18 matching lines...) Expand all Loading... |
131 return tests_to_run, tests_to_skip | 130 return tests_to_run, tests_to_skip |
132 | 131 |
133 # Create a sorted list of test files so the subset chunk, | 132 # Create a sorted list of test files so the subset chunk, |
134 # if used, contains alphabetically consecutive tests. | 133 # if used, contains alphabetically consecutive tests. |
135 if self._options.order == 'natural': | 134 if self._options.order == 'natural': |
136 tests_to_run.sort(key=self._port.test_key) | 135 tests_to_run.sort(key=self._port.test_key) |
137 elif self._options.order == 'random': | 136 elif self._options.order == 'random': |
138 random.shuffle(tests_to_run) | 137 random.shuffle(tests_to_run) |
139 elif self._options.order == 'random-seeded': | 138 elif self._options.order == 'random-seeded': |
140 rnd = random.Random() | 139 rnd = random.Random() |
141 rnd.seed(4) # http://xkcd.com/221/ | 140 rnd.seed(4) # http://xkcd.com/221/ |
142 rnd.shuffle(tests_to_run) | 141 rnd.shuffle(tests_to_run) |
143 | 142 |
144 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes
ts_to_run) | 143 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tes
ts_to_run) |
145 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) | 144 self._expectations.add_extra_skipped_tests(tests_in_other_chunks) |
146 tests_to_skip.update(tests_in_other_chunks) | 145 tests_to_skip.update(tests_in_other_chunks) |
147 | 146 |
148 return tests_to_run, tests_to_skip | 147 return tests_to_run, tests_to_skip |
149 | 148 |
150 def _test_input_for_file(self, test_file): | 149 def _test_input_for_file(self, test_file): |
151 return TestInput(test_file, | 150 return TestInput(test_file, |
152 self._options.slow_time_out_ms if self._test_is_slow(test_file) else
self._options.time_out_ms, | 151 self._options.slow_time_out_ms if self._test_is_slow(te
st_file) else self._options.time_out_ms, |
153 self._test_requires_lock(test_file), | 152 self._test_requires_lock(test_file), |
154 should_add_missing_baselines=(self._options.new_test_results and not
self._test_is_expected_missing(test_file))) | 153 should_add_missing_baselines=(self._options.new_test_re
sults and not self._test_is_expected_missing(test_file))) |
155 | 154 |
156 def _test_requires_lock(self, test_file): | 155 def _test_requires_lock(self, test_file): |
157 """Return True if the test needs to be locked when running multiple | 156 """Return True if the test needs to be locked when running multiple |
158 instances of this test runner. | 157 instances of this test runner. |
159 | 158 |
160 Perf tests are locked because heavy load caused by running other | 159 Perf tests are locked because heavy load caused by running other |
161 tests in parallel might cause some of them to time out. | 160 tests in parallel might cause some of them to time out. |
162 """ | 161 """ |
163 return self._is_http_test(test_file) or self._is_perf_test(test_file) | 162 return self._is_http_test(test_file) or self._is_perf_test(test_file) |
164 | 163 |
165 def _test_is_expected_missing(self, test_file): | 164 def _test_is_expected_missing(self, test_file): |
166 expectations = self._expectations.model().get_expectations(test_file) | 165 expectations = self._expectations.model().get_expectations(test_file) |
167 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations | 166 return test_expectations.MISSING in expectations or test_expectations.NE
EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e
xpectations |
168 | 167 |
169 def _test_is_slow(self, test_file): | 168 def _test_is_slow(self, test_file): |
170 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) | 169 return test_expectations.SLOW in self._expectations.model().get_expectat
ions(test_file) |
171 | 170 |
172 def needs_servers(self, test_names): | 171 def needs_servers(self, test_names): |
173 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) | 172 return any(self._test_requires_lock(test_name) for test_name in test_nam
es) |
174 | 173 |
175 def _rename_results_folder(self): | 174 def _rename_results_folder(self): |
176 try: | 175 try: |
177 timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._
filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))
)) | 176 timestamp = time.strftime( |
| 177 "%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self.
_filesystem.join(self._results_directory, "results.html")))) |
178 except (IOError, OSError), e: | 178 except (IOError, OSError), e: |
179 # It might be possible that results.html was not generated in previo
us run, because the test | 179 # It might be possible that results.html was not generated in previo
us run, because the test |
180 # run was interrupted even before testing started. In those cases, d
on't archive the folder. | 180 # run was interrupted even before testing started. In those cases, d
on't archive the folder. |
181 # Simply override the current folder contents with new results. | 181 # Simply override the current folder contents with new results. |
182 import errno | 182 import errno |
183 if e.errno == errno.EEXIST or e.errno == errno.ENOENT: | 183 if e.errno == errno.EEXIST or e.errno == errno.ENOENT: |
184 self._printer.write_update("No results.html file found in previo
us run, skipping it.") | 184 self._printer.write_update("No results.html file found in previo
us run, skipping it.") |
185 return None | 185 return None |
186 archived_name = ''.join((self._filesystem.basename(self._results_directo
ry), "_", timestamp)) | 186 archived_name = ''.join((self._filesystem.basename(self._results_directo
ry), "_", timestamp)) |
187 archived_path = self._filesystem.join(self._filesystem.dirname(self._res
ults_directory), archived_name) | 187 archived_path = self._filesystem.join(self._filesystem.dirname(self._res
ults_directory), archived_name) |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
274 try: | 274 try: |
275 self._start_servers(tests_to_run) | 275 self._start_servers(tests_to_run) |
276 | 276 |
277 num_workers = self._port.num_workers(int(self._options.child_process
es)) | 277 num_workers = self._port.num_workers(int(self._options.child_process
es)) |
278 | 278 |
279 initial_results = self._run_tests( | 279 initial_results = self._run_tests( |
280 tests_to_run, tests_to_skip, self._options.repeat_each, self._op
tions.iterations, | 280 tests_to_run, tests_to_skip, self._options.repeat_each, self._op
tions.iterations, |
281 num_workers) | 281 num_workers) |
282 | 282 |
283 # Don't retry failures when interrupted by user or failures limit ex
ception. | 283 # Don't retry failures when interrupted by user or failures limit ex
ception. |
284 should_retry_failures = should_retry_failures and not (initial_resul
ts.interrupted or initial_results.keyboard_interrupted) | 284 should_retry_failures = should_retry_failures and not ( |
| 285 initial_results.interrupted or initial_results.keyboard_interrup
ted) |
285 | 286 |
286 tests_to_retry = self._tests_to_retry(initial_results) | 287 tests_to_retry = self._tests_to_retry(initial_results) |
287 all_retry_results = [] | 288 all_retry_results = [] |
288 if should_retry_failures and tests_to_retry: | 289 if should_retry_failures and tests_to_retry: |
289 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() | 290 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed
() |
290 | 291 |
291 for retry_attempt in xrange(1, self._options.num_retries + 1): | 292 for retry_attempt in xrange(1, self._options.num_retries + 1): |
292 if not tests_to_retry: | 293 if not tests_to_retry: |
293 break | 294 break |
294 | 295 |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
490 json_results_generator.write_json(self._filesystem, times_trie, bot_
test_times_path) | 491 json_results_generator.write_json(self._filesystem, times_trie, bot_
test_times_path) |
491 | 492 |
492 stats_trie = self._stats_trie(initial_results) | 493 stats_trie = self._stats_trie(initial_results) |
493 stats_path = self._filesystem.join(self._results_directory, "stats.json"
) | 494 stats_path = self._filesystem.join(self._results_directory, "stats.json"
) |
494 self._filesystem.write_text_file(stats_path, json.dumps(stats_trie)) | 495 self._filesystem.write_text_file(stats_path, json.dumps(stats_trie)) |
495 | 496 |
496 full_results_path = self._filesystem.join(self._results_directory, "full
_results.json") | 497 full_results_path = self._filesystem.join(self._results_directory, "full
_results.json") |
497 json_results_generator.write_json(self._filesystem, summarized_full_resu
lts, full_results_path) | 498 json_results_generator.write_json(self._filesystem, summarized_full_resu
lts, full_results_path) |
498 | 499 |
499 full_results_path = self._filesystem.join(self._results_directory, "fail
ing_results.json") | 500 full_results_path = self._filesystem.join(self._results_directory, "fail
ing_results.json") |
500 # We write failing_results.json out as jsonp because we need to load it
from a file url for results.html and Chromium doesn't allow that. | 501 # We write failing_results.json out as jsonp because we need to load it |
| 502 # from a file url for results.html and Chromium doesn't allow that. |
501 json_results_generator.write_json(self._filesystem, summarized_failing_r
esults, full_results_path, callback="ADD_RESULTS") | 503 json_results_generator.write_json(self._filesystem, summarized_failing_r
esults, full_results_path, callback="ADD_RESULTS") |
502 | 504 |
503 _log.debug("Finished writing JSON files.") | 505 _log.debug("Finished writing JSON files.") |
504 | 506 |
505 def _upload_json_files(self): | 507 def _upload_json_files(self): |
506 if not self._options.test_results_server: | 508 if not self._options.test_results_server: |
507 return | 509 return |
508 | 510 |
509 if not self._options.master_name: | 511 if not self._options.master_name: |
510 _log.error("--test-results-server was set, but --master-name was not
. Not uploading JSON files.") | 512 _log.error("--test-results-server was set, but --master-name was not
. Not uploading JSON files.") |
511 return | 513 return |
512 | 514 |
513 _log.debug("Uploading JSON files for builder: %s", self._options.builder
_name) | 515 _log.debug("Uploading JSON files for builder: %s", self._options.builder
_name) |
514 attrs = [("builder", self._options.builder_name), | 516 attrs = [("builder", self._options.builder_name), |
515 ("testtype", self._options.step_name), | 517 ("testtype", self._options.step_name), |
516 ("master", self._options.master_name)] | 518 ("master", self._options.master_name)] |
517 | 519 |
518 files = [(file, self._filesystem.join(self._results_directory, file)) fo
r file in ["failing_results.json", "full_results.json", "times_ms.json"]] | 520 files = [(file, self._filesystem.join(self._results_directory, file)) |
| 521 for file in ["failing_results.json", "full_results.json", "time
s_ms.json"]] |
519 | 522 |
520 url = "http://%s/testfile/upload" % self._options.test_results_server | 523 url = "http://%s/testfile/upload" % self._options.test_results_server |
521 # Set uploading timeout in case appengine server is having problems. | 524 # Set uploading timeout in case appengine server is having problems. |
522 # 120 seconds are more than enough to upload test results. | 525 # 120 seconds are more than enough to upload test results. |
523 uploader = FileUploader(url, 120) | 526 uploader = FileUploader(url, 120) |
524 try: | 527 try: |
525 response = uploader.upload_as_multipart_form_data(self._filesystem,
files, attrs) | 528 response = uploader.upload_as_multipart_form_data(self._filesystem,
files, attrs) |
526 if response: | 529 if response: |
527 if response.code == 200: | 530 if response.code == 200: |
528 _log.debug("JSON uploaded.") | 531 _log.debug("JSON uploaded.") |
(...skipping 12 matching lines...) Expand all Loading... |
541 if self._filesystem.exists(results_file): | 544 if self._filesystem.exists(results_file): |
542 self._filesystem.copyfile(results_file, destination_path) | 545 self._filesystem.copyfile(results_file, destination_path) |
543 | 546 |
544 def _stats_trie(self, initial_results): | 547 def _stats_trie(self, initial_results): |
545 def _worker_number(worker_name): | 548 def _worker_number(worker_name): |
546 return int(worker_name.split('/')[1]) if worker_name else -1 | 549 return int(worker_name.split('/')[1]) if worker_name else -1 |
547 | 550 |
548 stats = {} | 551 stats = {} |
549 for result in initial_results.results_by_name.values(): | 552 for result in initial_results.results_by_name.values(): |
550 if result.type != test_expectations.SKIP: | 553 if result.type != test_expectations.SKIP: |
551 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int
(result.total_run_time * 1000))} | 554 stats[result.test_name] = {'results': (_worker_number(result.wor
ker_name), result.test_number, result.pid, int( |
| 555 result.test_run_time * 1000), int(result.total_run_time * 10
00))} |
552 stats_trie = {} | 556 stats_trie = {} |
553 for name, value in stats.iteritems(): | 557 for name, value in stats.iteritems(): |
554 json_results_generator.add_path_to_trie(name, value, stats_trie) | 558 json_results_generator.add_path_to_trie(name, value, stats_trie) |
555 return stats_trie | 559 return stats_trie |
OLD | NEW |