OLD | NEW |
---|---|
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged |
3 # | 3 # |
4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
6 # met: | 6 # met: |
7 # | 7 # |
8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
144 def _test_is_expected_missing(self, test_file): | 144 def _test_is_expected_missing(self, test_file): |
145 expectations = self._expectations.model().get_expectations(test_file) | 145 expectations = self._expectations.model().get_expectations(test_file) |
146 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations | 146 return test_expectations.MISSING in expectations or test_expectations.NE EDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in e xpectations |
147 | 147 |
148 def _test_is_slow(self, test_file): | 148 def _test_is_slow(self, test_file): |
149 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) | 149 return test_expectations.SLOW in self._expectations.model().get_expectat ions(test_file) |
150 | 150 |
151 def needs_servers(self, test_names): | 151 def needs_servers(self, test_names): |
152 return any(self._test_requires_lock(test_name) for test_name in test_nam es) | 152 return any(self._test_requires_lock(test_name) for test_name in test_nam es) |
153 | 153 |
154 def _rename_results_folder(self): | |
155 try: | |
156 timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime(self._fil esystem.mtime(self._filesystem.join(self._results_directory, "results.html")))) | |
Dirk Pranke
2014/07/29 18:53:06
Nit: can this be time.localtime() instead of time.
patro
2014/07/30 06:37:45
Done.
| |
157 except OSError, e: | |
158 # It might be possible that results.html was not generated in previo us run, because the test | |
159 # run was interrupted even before testing started. In those cases, d on't archive the folder. | |
160 # Simply override the current folder contents with new results. | |
161 import errno | |
162 if e.errno == errno.EEXIST: | |
163 _log.warning("No results.html file found in previous run, skippi ng it.") | |
164 return None | |
165 archived_name = ''.join((self._filesystem.basename(self._results_directo ry), "_", timestamp)) | |
166 archived_path = self._filesystem.join(self._filesystem.dirname(self._res ults_directory), archived_name) | |
167 self._filesystem.move(self._results_directory, archived_path) | |
168 | |
154 def _set_up_run(self, test_names): | 169 def _set_up_run(self, test_names): |
155 self._printer.write_update("Checking build ...") | 170 self._printer.write_update("Checking build ...") |
156 if self._options.build: | 171 if self._options.build: |
157 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) | 172 exit_code = self._port.check_build(self.needs_servers(test_names), s elf._printer) |
158 if exit_code: | 173 if exit_code: |
159 _log.error("Build check failed") | 174 _log.error("Build check failed") |
160 return exit_code | 175 return exit_code |
161 | 176 |
162 # This must be started before we check the system dependencies, | 177 # This must be started before we check the system dependencies, |
163 # since the helper may do things to make the setup correct. | 178 # since the helper may do things to make the setup correct. |
164 if self._options.pixel_tests: | 179 if self._options.pixel_tests: |
165 self._printer.write_update("Starting pixel test helper ...") | 180 self._printer.write_update("Starting pixel test helper ...") |
166 self._port.start_helper() | 181 self._port.start_helper() |
167 | 182 |
168 # Check that the system dependencies (themes, fonts, ...) are correct. | 183 # Check that the system dependencies (themes, fonts, ...) are correct. |
169 if not self._options.nocheck_sys_deps: | 184 if not self._options.nocheck_sys_deps: |
170 self._printer.write_update("Checking system dependencies ...") | 185 self._printer.write_update("Checking system dependencies ...") |
171 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) | 186 exit_code = self._port.check_sys_deps(self.needs_servers(test_names) ) |
172 if exit_code: | 187 if exit_code: |
173 self._port.stop_helper() | 188 self._port.stop_helper() |
174 return exit_code | 189 return exit_code |
175 | 190 |
176 if self._options.clobber_old_results: | 191 # FIXME : Add a condition here to limit the number of archived results ( total_archived_results < MAX_ARCHIVE_RESULTS) |
Dirk Pranke
2014/07/29 18:53:06
We definitely need to implement this FIXME before
patro
2014/07/30 06:37:45
If we delete the old results once total_archived_r
| |
192 if self._options.enable_versioned_results and self._filesystem.exists(se lf._results_directory): | |
193 if self._options.clobber_old_results: | |
194 _log.warning("Flag --enable_versioned_results overrides --clobbe r-old-results.") | |
195 # Rename the existing results folder for archiving. | |
196 self._rename_results_folder() | |
197 elif self._options.clobber_old_results: | |
177 self._clobber_old_results() | 198 self._clobber_old_results() |
178 | 199 |
179 # Create the output directory if it doesn't already exist. | 200 # Create the output directory if it doesn't already exist. |
180 self._port.host.filesystem.maybe_make_directory(self._results_directory) | 201 self._port.host.filesystem.maybe_make_directory(self._results_directory) |
181 | 202 |
182 self._port.setup_test_run() | 203 self._port.setup_test_run() |
183 return test_run_results.OK_EXIT_STATUS | 204 return test_run_results.OK_EXIT_STATUS |
184 | 205 |
185 def run(self, args): | 206 def run(self, args): |
186 """Run the tests and return a RunDetails object with the results.""" | 207 """Run the tests and return a RunDetails object with the results.""" |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 return int(worker_name.split('/')[1]) if worker_name else -1 | 458 return int(worker_name.split('/')[1]) if worker_name else -1 |
438 | 459 |
439 stats = {} | 460 stats = {} |
440 for result in initial_results.results_by_name.values(): | 461 for result in initial_results.results_by_name.values(): |
441 if result.type != test_expectations.SKIP: | 462 if result.type != test_expectations.SKIP: |
442 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} | 463 stats[result.test_name] = {'results': (_worker_number(result.wor ker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int (result.total_run_time * 1000))} |
443 stats_trie = {} | 464 stats_trie = {} |
444 for name, value in stats.iteritems(): | 465 for name, value in stats.iteritems(): |
445 json_results_generator.add_path_to_trie(name, value, stats_trie) | 466 json_results_generator.add_path_to_trie(name, value, stats_trie) |
446 return stats_trie | 467 return stats_trie |
OLD | NEW |