OLD | NEW |
---|---|
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
144 def _test_is_expected_missing(self, test_file): | 144 def _test_is_expected_missing(self, test_file): |
145 expectations = self._expectations.model().get_expectations(test_file) | 145 expectations = self._expectations.model().get_expectations(test_file) |
146 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations | 146 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations |
147 | 147 |
148 def _test_is_slow(self, test_file): | 148 def _test_is_slow(self, test_file): |
149 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) | 149 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) |
150 | 150 |
151 def needs_servers(self, test_names): | 151 def needs_servers(self, test_names): |
152 return any(self._test_requires_lock(test_name) for test_name in test_nam es) | 152 return any(self._test_requires_lock(test_name) for test_name in test_nam es) |
153 | 153 |
154 def _rename_results_folder(self): | |
155 try: | |
156 timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime(self._fil esystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) | |
157 except OSError, e: | |
158 # It might be possible that results.html was not generated in previo us run, because the test | |
159 # run was interrupted even before testing started. In those cases, d on't archive the folder. | |
160 # Simply override the current folder contents with new results. | |
161 import errno | |
162 if e.errno == errno.EEXIST: | |
163 _log.warning("No results.html file found in previous run, skippi ng it.") | |
164 return None | |
165 archived_name = ''.join((self._filesystem.basename(self._results_directo ry), "_", timestamp)) | |
166 archived_path = self._filesystem.join(self._filesystem.dirname(self._res ults_directory), archived_name) | |
167 self._filesystem.move(self._results_directory, archived_path) | |
168 return archived_path | |
Dirk Pranke
2014/07/16 19:32:44
Do you need to return the archived_path? You're no
patro
2014/07/18 06:53:25
Done.
| |
169 | |
154 def _set_up_run(self, test_names): | 170 def _set_up_run(self, test_names): |
155 self._printer.write_update("Checking build ...") | 171 self._printer.write_update("Checking build ...") |
156 if self._options.build: | 172 if self._options.build: |
157 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) | 173 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) |
158 if exit_code: | 174 if exit_code: |
159 _log.error("Build check failed") | 175 _log.error("Build check failed") |
160 return exit_code | 176 return exit_code |
161 | 177 |
162 # This must be started before we check the system dependencies, | 178 # This must be started before we check the system dependencies, |
163 # since the helper may do things to make the setup correct. | 179 # since the helper may do things to make the setup correct. |
164 if self._options.pixel_tests: | 180 if self._options.pixel_tests: |
165 self._printer.write_update("Starting pixel test helper ...") | 181 self._printer.write_update("Starting pixel test helper ...") |
166 self._port.start_helper() | 182 self._port.start_helper() |
167 | 183 |
168 # Check that the system dependencies (themes, fonts, ...) are correct. | 184 # Check that the system dependencies (themes, fonts, ...) are correct. |
169 if not self._options.nocheck_sys_deps: | 185 if not self._options.nocheck_sys_deps: |
170 self._printer.write_update("Checking system dependencies ...") | 186 self._printer.write_update("Checking system dependencies ...") |
171 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) | 187 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) |
172 if exit_code: | 188 if exit_code: |
173 self._port.stop_helper() | 189 self._port.stop_helper() |
174 return exit_code | 190 return exit_code |
175 | 191 |
176 if self._options.clobber_old_results: | 192 # FIXME : Add a condition here to limit the number of archived results ( total_archived_results < MAX_ARCHIVE_RESULTS) |
193 if self._options.enable_versioned_results and self._filesystem.exists(se lf._results_directory): | |
194 if self._options.clobber_old_results: | |
195 _log.warning("Flag --enable_versioned_results overrides --clobbe r-old-results.") | |
196 # Rename the existing results folder for archiving. | |
197 self._rename_results_folder() | |
198 elif self._options.clobber_old_results: | |
177 self._clobber_old_results() | 199 self._clobber_old_results() |
178 | 200 |
179 # Create the output directory if it doesn't already exist. | 201 # Create the output directory if it doesn't already exist. |
180 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 202 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
181 | 203 |
182 self._port.setup_test_run() | 204 self._port.setup_test_run() |
183 return test_run_results.OK_EXIT_STATUS | 205 return test_run_results.OK_EXIT_STATUS |
184 | 206 |
185 def run(self, args): | 207 def run(self, args): |
186 """Run the tests and return a RunDetails object with the results.""" | 208 """Run the tests and return a RunDetails object with the results.""" |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 return int(worker_name.split('/')[1]) if worker_name else -1 | 459 return int(worker_name.split('/')[1]) if worker_name else -1 |
438 | 460 |
439 stats = {} | 461 stats = {} |
440 for result in initial_results.results_by_name.values(): | 462 for result in initial_results.results_by_name.values(): |
441 if result.type != test_expectations.SKIP: | 463 if result.type != test_expectations.SKIP: |
442 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} | 464 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} |
443 stats_trie = {} | 465 stats_trie = {} |
444 for name, value in stats.iteritems(): | 466 for name, value in stats.iteritems(): |
445 json_results_generator.add_path_to_trie(name, value, stats_trie) | 467 json_results_generator.add_path_to_trie(name, value, stats_trie) |
446 return stats_trie | 468 return stats_trie |
OLD | NEW |