| OLD | NEW |
| 1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved. | 1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 88 fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path
()] | 88 fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path
()] |
| 89 self._print_default("Baseline search path: %s -> generic" % " -> ".join(
fallback_path)) | 89 self._print_default("Baseline search path: %s -> generic" % " -> ".join(
fallback_path)) |
| 90 | 90 |
| 91 self._print_default("Using %s build" % self._options.configuration) | 91 self._print_default("Using %s build" % self._options.configuration) |
| 92 if self._options.pixel_tests: | 92 if self._options.pixel_tests: |
| 93 self._print_default("Pixel tests enabled") | 93 self._print_default("Pixel tests enabled") |
| 94 else: | 94 else: |
| 95 self._print_default("Pixel tests disabled") | 95 self._print_default("Pixel tests disabled") |
| 96 | 96 |
| 97 self._print_default("Regular timeout: %s, slow test timeout: %s" % | 97 self._print_default("Regular timeout: %s, slow test timeout: %s" % |
| 98 (self._options.time_out_ms, self._options.slow_time_out_ms)) | 98 (self._options.time_out_ms, self._options.slow_time_
out_ms)) |
| 99 | 99 |
| 100 self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_li
ne())) | 100 self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_li
ne())) |
| 101 self._print_default('') | 101 self._print_default('') |
| 102 | 102 |
| 103 def print_found(self, num_all_test_files, num_to_run, repeat_each, iteration
s): | 103 def print_found(self, num_all_test_files, num_to_run, repeat_each, iteration
s): |
| 104 found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_
test_files), num_to_run) | 104 found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_
test_files), num_to_run) |
| 105 if repeat_each * iterations > 1: | 105 if repeat_each * iterations > 1: |
| 106 found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' %
(repeat_each * iterations, repeat_each, iterations) | 106 found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' %
(repeat_each * iterations, repeat_each, iterations) |
| 107 found_str += ', skipping %d' % (num_all_test_files - num_to_run) | 107 found_str += ', skipping %d' % (num_all_test_files - num_to_run) |
| 108 self._print_default(found_str + '.') | 108 self._print_default(found_str + '.') |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 158 self._print_debug("Thread timing:") | 158 self._print_debug("Thread timing:") |
| 159 stats = {} | 159 stats = {} |
| 160 cuml_time = 0 | 160 cuml_time = 0 |
| 161 for result in run_results.results_by_name.values(): | 161 for result in run_results.results_by_name.values(): |
| 162 stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time':
0}) | 162 stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time':
0}) |
| 163 stats[result.worker_name]['num_tests'] += 1 | 163 stats[result.worker_name]['num_tests'] += 1 |
| 164 stats[result.worker_name]['total_time'] += result.total_run_time | 164 stats[result.worker_name]['total_time'] += result.total_run_time |
| 165 cuml_time += result.total_run_time | 165 cuml_time += result.total_run_time |
| 166 | 166 |
| 167 for worker_name in stats: | 167 for worker_name in stats: |
| 168 self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name,
stats[worker_name]['num_tests'], stats[worker_name]['total_time'])) | 168 self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name,
stats[ |
| 169 worker_name]['num_tests'], stats[worker_name]['tot
al_time'])) |
| 169 self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cum
l_time / num_workers)) | 170 self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cum
l_time / num_workers)) |
| 170 self._print_debug("") | 171 self._print_debug("") |
| 171 | 172 |
| 172 def _print_aggregate_test_statistics(self, run_results): | 173 def _print_aggregate_test_statistics(self, run_results): |
| 173 times_for_dump_render_tree = [result.test_run_time for result in run_res
ults.results_by_name.values()] | 174 times_for_dump_render_tree = [result.test_run_time for result in run_res
ults.results_by_name.values()] |
| 174 self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (sec
onds):", times_for_dump_render_tree) | 175 self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (sec
onds):", times_for_dump_render_tree) |
| 175 | 176 |
| 176 def _print_individual_test_times(self, run_results): | 177 def _print_individual_test_times(self, run_results): |
| 177 # Reverse-sort by the time spent in the driver. | 178 # Reverse-sort by the time spent in the driver. |
| 178 | 179 |
| 179 individual_test_timings = sorted(run_results.results_by_name.values(), k
ey=lambda result: result.test_run_time, reverse=True) | 180 individual_test_timings = sorted(run_results.results_by_name.values(), |
| 181 key=lambda result: result.test_run_time
, reverse=True) |
| 180 num_printed = 0 | 182 num_printed = 0 |
| 181 slow_tests = [] | 183 slow_tests = [] |
| 182 timeout_or_crash_tests = [] | 184 timeout_or_crash_tests = [] |
| 183 unexpected_slow_tests = [] | 185 unexpected_slow_tests = [] |
| 184 for test_tuple in individual_test_timings: | 186 for test_tuple in individual_test_timings: |
| 185 test_name = test_tuple.test_name | 187 test_name = test_tuple.test_name |
| 186 is_timeout_crash_or_slow = False | 188 is_timeout_crash_or_slow = False |
| 187 if test_name in run_results.slow_tests: | 189 if test_name in run_results.slow_tests: |
| 188 is_timeout_crash_or_slow = True | 190 is_timeout_crash_or_slow = True |
| 189 slow_tests.append(test_tuple) | 191 slow_tests.append(test_tuple) |
| 190 | 192 |
| 191 if test_name in run_results.failures_by_name: | 193 if test_name in run_results.failures_by_name: |
| 192 result = run_results.results_by_name[test_name].type | 194 result = run_results.results_by_name[test_name].type |
| 193 if (result == test_expectations.TIMEOUT or | 195 if (result == test_expectations.TIMEOUT or |
| 194 result == test_expectations.CRASH): | 196 result == test_expectations.CRASH): |
| 195 is_timeout_crash_or_slow = True | 197 is_timeout_crash_or_slow = True |
| 196 timeout_or_crash_tests.append(test_tuple) | 198 timeout_or_crash_tests.append(test_tuple) |
| 197 | 199 |
| 198 if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO
_LOG): | 200 if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO
_LOG): |
| 199 num_printed = num_printed + 1 | 201 num_printed = num_printed + 1 |
| 200 unexpected_slow_tests.append(test_tuple) | 202 unexpected_slow_tests.append(test_tuple) |
| 201 | 203 |
| 202 self._print_debug("") | 204 self._print_debug("") |
| 203 if unexpected_slow_tests: | 205 if unexpected_slow_tests: |
| 204 self._print_test_list_timing("%s slowest tests that are not marked a
s SLOW and did not timeout/crash:" % | 206 self._print_test_list_timing("%s slowest tests that are not marked a
s SLOW and did not timeout/crash:" % |
| 205 NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) | 207 NUM_SLOW_TESTS_TO_LOG, unexpected_slow_
tests) |
| 206 self._print_debug("") | 208 self._print_debug("") |
| 207 | 209 |
| 208 if slow_tests: | 210 if slow_tests: |
| 209 self._print_test_list_timing("Tests marked as SLOW:", slow_tests) | 211 self._print_test_list_timing("Tests marked as SLOW:", slow_tests) |
| 210 self._print_debug("") | 212 self._print_debug("") |
| 211 | 213 |
| 212 if timeout_or_crash_tests: | 214 if timeout_or_crash_tests: |
| 213 self._print_test_list_timing("Tests that timed out or crashed:", tim
eout_or_crash_tests) | 215 self._print_test_list_timing("Tests that timed out or crashed:", tim
eout_or_crash_tests) |
| 214 self._print_debug("") | 216 self._print_debug("") |
| 215 | 217 |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 295 incomplete_str = '' | 297 incomplete_str = '' |
| 296 if incomplete: | 298 if incomplete: |
| 297 self._print_default("") | 299 self._print_default("") |
| 298 incomplete_str = " (%d didn't run)" % incomplete | 300 incomplete_str = " (%d didn't run)" % incomplete |
| 299 | 301 |
| 300 if self._options.verbose or self._options.debug_rwt_logging or unexpecte
d: | 302 if self._options.verbose or self._options.debug_rwt_logging or unexpecte
d: |
| 301 self.writeln("") | 303 self.writeln("") |
| 302 | 304 |
| 303 expected_summary_str = '' | 305 expected_summary_str = '' |
| 304 if run_results.expected_failures > 0: | 306 if run_results.expected_failures > 0: |
| 305 expected_summary_str = " (%d passed, %d didn't)" % (expected - run_r
esults.expected_failures, run_results.expected_failures) | 307 expected_summary_str = " (%d passed, %d didn't)" % ( |
| 308 expected - run_results.expected_failures, run_results.expected_f
ailures) |
| 306 | 309 |
| 307 summary = '' | 310 summary = '' |
| 308 if unexpected == 0: | 311 if unexpected == 0: |
| 309 if expected == total: | 312 if expected == total: |
| 310 if expected > 1: | 313 if expected > 1: |
| 311 summary = "All %d tests ran as expected%s%s." % (expected, e
xpected_summary_str, timing_summary) | 314 summary = "All %d tests ran as expected%s%s." % (expected, e
xpected_summary_str, timing_summary) |
| 312 else: | 315 else: |
| 313 summary = "The test ran as expected%s%s." % (expected_summar
y_str, timing_summary) | 316 summary = "The test ran as expected%s%s." % (expected_summar
y_str, timing_summary) |
| 314 else: | 317 else: |
| 315 summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test
', expected), expected_summary_str, incomplete_str, timing_summary) | 318 summary = "%s ran as expected%s%s%s." % (grammar.pluralize( |
| 319 'test', expected), expected_summary_str, incomplete_str, tim
ing_summary) |
| 316 else: | 320 else: |
| 317 summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluraliz
e('test', expected), expected_summary_str, unexpected, incomplete_str, timing_su
mmary) | 321 summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluraliz
e( |
| 322 'test', expected), expected_summary_str, unexpected, incomplete_
str, timing_summary) |
| 318 | 323 |
| 319 self._print_quiet(summary) | 324 self._print_quiet(summary) |
| 320 self._print_quiet("") | 325 self._print_quiet("") |
| 321 | 326 |
| 322 def _test_status_line(self, test_name, suffix): | 327 def _test_status_line(self, test_name, suffix): |
| 323 format_string = '[%d/%d] %s%s' | 328 format_string = '[%d/%d] %s%s' |
| 324 status_line = format_string % (self.num_completed, self.num_tests, test_
name, suffix) | 329 status_line = format_string % (self.num_completed, self.num_tests, test_
name, suffix) |
| 325 if len(status_line) > self._meter.number_of_columns(): | 330 if len(status_line) > self._meter.number_of_columns(): |
| 326 overflow_columns = len(status_line) - self._meter.number_of_columns(
) | 331 overflow_columns = len(status_line) - self._meter.number_of_columns(
) |
| 327 ellipsis = '...' | 332 ellipsis = '...' |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 390 self._print_default(' base: %s' % base) | 395 self._print_default(' base: %s' % base) |
| 391 self._print_default(' args: %s' % args) | 396 self._print_default(' args: %s' % args) |
| 392 self._print_default(' reference_args: %s' % reference_args) | 397 self._print_default(' reference_args: %s' % reference_args) |
| 393 | 398 |
| 394 references = self._port.reference_files(test_name) | 399 references = self._port.reference_files(test_name) |
| 395 if references: | 400 if references: |
| 396 for _, filename in references: | 401 for _, filename in references: |
| 397 self._print_default(' ref: %s' % self._port.relative_test_filen
ame(filename)) | 402 self._print_default(' ref: %s' % self._port.relative_test_filen
ame(filename)) |
| 398 else: | 403 else: |
| 399 for extension in ('.txt', '.png', '.wav'): | 404 for extension in ('.txt', '.png', '.wav'): |
| 400 self._print_baseline(test_name, extension) | 405 self._print_baseline(test_name, extension) |
| 401 | 406 |
| 402 self._print_default(' exp: %s' % exp_str) | 407 self._print_default(' exp: %s' % exp_str) |
| 403 self._print_default(' got: %s' % got_str) | 408 self._print_default(' got: %s' % got_str) |
| 404 self._print_default(' took: %-.3f' % result.test_run_time) | 409 self._print_default(' took: %-.3f' % result.test_run_time) |
| 405 self._print_default('') | 410 self._print_default('') |
| 406 | 411 |
| 407 def _print_baseline(self, test_name, extension): | 412 def _print_baseline(self, test_name, extension): |
| 408 baseline = self._port.expected_filename(test_name, extension) | 413 baseline = self._port.expected_filename(test_name, extension) |
| 409 if self._port._filesystem.exists(baseline): | 414 if self._port._filesystem.exists(baseline): |
| 410 relpath = self._port.relative_test_filename(baseline) | 415 relpath = self._port.relative_test_filename(baseline) |
| (...skipping 16 matching lines...) Expand all Loading... |
| 427 self._meter.write_throttled_update(msg) | 432 self._meter.write_throttled_update(msg) |
| 428 | 433 |
| 429 def write_update(self, msg): | 434 def write_update(self, msg): |
| 430 self._meter.write_update(msg) | 435 self._meter.write_update(msg) |
| 431 | 436 |
| 432 def writeln(self, msg): | 437 def writeln(self, msg): |
| 433 self._meter.writeln(msg) | 438 self._meter.writeln(msg) |
| 434 | 439 |
| 435 def flush(self): | 440 def flush(self): |
| 436 self._meter.flush() | 441 self._meter.flush() |
| OLD | NEW |