| OLD | NEW |
| 1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved. | 1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 49 optparse.make_option('-v', '--verbose', action='store_true', default=Fal
se, | 49 optparse.make_option('-v', '--verbose', action='store_true', default=Fal
se, |
| 50 help='print a summarized result for every test (one
line per test)'), | 50 help='print a summarized result for every test (one
line per test)'), |
| 51 optparse.make_option('--details', action='store_true', default=False, | 51 optparse.make_option('--details', action='store_true', default=False, |
| 52 help='print detailed results for every test'), | 52 help='print detailed results for every test'), |
| 53 optparse.make_option('--debug-rwt-logging', action='store_true', default
=False, | 53 optparse.make_option('--debug-rwt-logging', action='store_true', default
=False, |
| 54 help='print timestamps and debug information for ru
n-webkit-tests itself'), | 54 help='print timestamps and debug information for ru
n-webkit-tests itself'), |
| 55 ] | 55 ] |
| 56 | 56 |
| 57 | 57 |
| 58 class Printer(object): | 58 class Printer(object): |
| 59 |
| 59 """Class handling all non-debug-logging printing done by run-webkit-tests.""
" | 60 """Class handling all non-debug-logging printing done by run-webkit-tests.""
" |
| 60 | 61 |
| 61 def __init__(self, port, options, regular_output, logger=None): | 62 def __init__(self, port, options, regular_output, logger=None): |
| 62 self.num_completed = 0 | 63 self.num_completed = 0 |
| 63 self.num_tests = 0 | 64 self.num_tests = 0 |
| 64 self._port = port | 65 self._port = port |
| 65 self._options = options | 66 self._options = options |
| 66 self._meter = MeteredStream(regular_output, options.debug_rwt_logging, l
ogger=logger, | 67 self._meter = MeteredStream(regular_output, options.debug_rwt_logging, l
ogger=logger, |
| 67 number_of_columns=self._port.host.platform.t
erminal_width()) | 68 number_of_columns=self._port.host.platform.t
erminal_width()) |
| 68 self._running_tests = [] | 69 self._running_tests = [] |
| 69 self._completed_tests = [] | 70 self._completed_tests = [] |
| 70 | 71 |
| 71 def cleanup(self): | 72 def cleanup(self): |
| 72 self._meter.cleanup() | 73 self._meter.cleanup() |
| 73 | 74 |
| 74 def __del__(self): | 75 def __del__(self): |
| 75 self.cleanup() | 76 self.cleanup() |
| 76 | 77 |
| 77 def print_config(self, results_directory): | 78 def print_config(self, results_directory): |
| 78 self._print_default("Using port '%s'" % self._port.name()) | 79 self._print_default("Using port '%s'" % self._port.name()) |
| 79 self._print_default("Test configuration: %s" % self._port.test_configura
tion()) | 80 self._print_default('Test configuration: %s' % self._port.test_configura
tion()) |
| 80 self._print_default("View the test results at file://%s/results.html" %
results_directory) | 81 self._print_default('View the test results at file://%s/results.html' %
results_directory) |
| 81 | 82 |
| 82 if self._options.enable_versioned_results: | 83 if self._options.enable_versioned_results: |
| 83 self._print_default("View the archived results dashboard at file://%
s/dashboard.html" % results_directory) | 84 self._print_default('View the archived results dashboard at file://%
s/dashboard.html' % results_directory) |
| 84 | 85 |
| 85 # FIXME: should these options be in printing_options? | 86 # FIXME: should these options be in printing_options? |
| 86 if self._options.new_baseline: | 87 if self._options.new_baseline: |
| 87 self._print_default("Placing new baselines in %s" % self._port.basel
ine_path()) | 88 self._print_default('Placing new baselines in %s' % self._port.basel
ine_path()) |
| 88 | 89 |
| 89 fs = self._port.host.filesystem | 90 fs = self._port.host.filesystem |
| 90 fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path
()] | 91 fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path
()] |
| 91 self._print_default("Baseline search path: %s -> generic" % " -> ".join(
fallback_path)) | 92 self._print_default('Baseline search path: %s -> generic' % ' -> '.join(
fallback_path)) |
| 92 | 93 |
| 93 self._print_default("Using %s build" % self._options.configuration) | 94 self._print_default('Using %s build' % self._options.configuration) |
| 94 if self._options.pixel_tests: | 95 if self._options.pixel_tests: |
| 95 self._print_default("Pixel tests enabled") | 96 self._print_default('Pixel tests enabled') |
| 96 else: | 97 else: |
| 97 self._print_default("Pixel tests disabled") | 98 self._print_default('Pixel tests disabled') |
| 98 | 99 |
| 99 self._print_default("Regular timeout: %s, slow test timeout: %s" % | 100 self._print_default('Regular timeout: %s, slow test timeout: %s' % |
| 100 (self._options.time_out_ms, self._options.slow_time_out_ms)) | 101 (self._options.time_out_ms, self._options.slow_time_
out_ms)) |
| 101 | 102 |
| 102 self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_li
ne())) | 103 self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_li
ne())) |
| 103 self._print_default('') | 104 self._print_default('') |
| 104 | 105 |
| 105 def print_found(self, num_all_test_files, num_to_run, repeat_each, iteration
s): | 106 def print_found(self, num_all_test_files, num_to_run, repeat_each, iteration
s): |
| 106 found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_
test_files), num_to_run) | 107 found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_
test_files), num_to_run) |
| 107 if repeat_each * iterations > 1: | 108 if repeat_each * iterations > 1: |
| 108 found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' %
(repeat_each * iterations, repeat_each, iterations) | 109 found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' %
(repeat_each * iterations, repeat_each, iterations) |
| 109 found_str += ', skipping %d' % (num_all_test_files - num_to_run) | 110 found_str += ', skipping %d' % (num_all_test_files - num_to_run) |
| 110 self._print_default(found_str + '.') | 111 self._print_default(found_str + '.') |
| 111 | 112 |
| 112 def print_expected(self, run_results, tests_with_result_type_callback): | 113 def print_expected(self, run_results, tests_with_result_type_callback): |
| 113 self._print_expected_results_of_type(run_results, test_expectations.PASS
, "passes", tests_with_result_type_callback) | 114 self._print_expected_results_of_type(run_results, test_expectations.PASS
, 'passes', tests_with_result_type_callback) |
| 114 self._print_expected_results_of_type(run_results, test_expectations.FAIL
, "failures", tests_with_result_type_callback) | 115 self._print_expected_results_of_type(run_results, test_expectations.FAIL
, 'failures', tests_with_result_type_callback) |
| 115 self._print_expected_results_of_type(run_results, test_expectations.FLAK
Y, "flaky", tests_with_result_type_callback) | 116 self._print_expected_results_of_type(run_results, test_expectations.FLAK
Y, 'flaky', tests_with_result_type_callback) |
| 116 self._print_debug('') | 117 self._print_debug('') |
| 117 | 118 |
| 118 def print_workers_and_shards(self, num_workers, num_shards, num_locked_shard
s): | 119 def print_workers_and_shards(self, num_workers, num_shards, num_locked_shard
s): |
| 119 driver_name = self._port.driver_name() | 120 driver_name = self._port.driver_name() |
| 120 if num_workers == 1: | 121 if num_workers == 1: |
| 121 self._print_default("Running 1 %s." % driver_name) | 122 self._print_default('Running 1 %s.' % driver_name) |
| 122 self._print_debug("(%s)." % grammar.pluralize('shard', num_shards)) | 123 self._print_debug('(%s).' % grammar.pluralize('shard', num_shards)) |
| 123 else: | 124 else: |
| 124 self._print_default("Running %d %ss in parallel." % (num_workers, dr
iver_name)) | 125 self._print_default('Running %d %ss in parallel.' % (num_workers, dr
iver_name)) |
| 125 self._print_debug("(%d shards; %d locked)." % (num_shards, num_locke
d_shards)) | 126 self._print_debug('(%d shards; %d locked).' % (num_shards, num_locke
d_shards)) |
| 126 self._print_default('') | 127 self._print_default('') |
| 127 | 128 |
| 128 def _print_expected_results_of_type(self, run_results, result_type, result_t
ype_str, tests_with_result_type_callback): | 129 def _print_expected_results_of_type(self, run_results, result_type, result_t
ype_str, tests_with_result_type_callback): |
| 129 tests = tests_with_result_type_callback(result_type) | 130 tests = tests_with_result_type_callback(result_type) |
| 130 now = run_results.tests_by_timeline[test_expectations.NOW] | 131 now = run_results.tests_by_timeline[test_expectations.NOW] |
| 131 wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX] | 132 wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX] |
| 132 | 133 |
| 133 # We use a fancy format string in order to print the data out in a | 134 # We use a fancy format string in order to print the data out in a |
| 134 # nicely-aligned table. | 135 # nicely-aligned table. |
| 135 fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" | 136 fmtstr = ('Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)' |
| 136 % (self._num_digits(now), self._num_digits(wontfix))) | 137 % (self._num_digits(now), self._num_digits(wontfix))) |
| 137 self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now
), len(tests & wontfix))) | 138 self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now
), len(tests & wontfix))) |
| 138 | 139 |
| 139 def _num_digits(self, num): | 140 def _num_digits(self, num): |
| 140 ndigits = 1 | 141 ndigits = 1 |
| 141 if len(num): | 142 if len(num): |
| 142 ndigits = int(math.log10(len(num))) + 1 | 143 ndigits = int(math.log10(len(num))) + 1 |
| 143 return ndigits | 144 return ndigits |
| 144 | 145 |
| 145 def print_results(self, run_time, run_results, summarized_results): | 146 def print_results(self, run_time, run_results, summarized_results): |
| 146 self._print_timing_statistics(run_time, run_results) | 147 self._print_timing_statistics(run_time, run_results) |
| 147 self._print_one_line_summary(run_time, run_results) | 148 self._print_one_line_summary(run_time, run_results) |
| 148 | 149 |
| 149 def _print_timing_statistics(self, total_time, run_results): | 150 def _print_timing_statistics(self, total_time, run_results): |
| 150 self._print_debug("Test timing:") | 151 self._print_debug('Test timing:') |
| 151 self._print_debug(" %6.2f total testing time" % total_time) | 152 self._print_debug(' %6.2f total testing time' % total_time) |
| 152 self._print_debug("") | 153 self._print_debug('') |
| 153 | 154 |
| 154 self._print_worker_statistics(run_results, int(self._options.child_proce
sses)) | 155 self._print_worker_statistics(run_results, int(self._options.child_proce
sses)) |
| 155 self._print_aggregate_test_statistics(run_results) | 156 self._print_aggregate_test_statistics(run_results) |
| 156 self._print_individual_test_times(run_results) | 157 self._print_individual_test_times(run_results) |
| 157 self._print_directory_timings(run_results) | 158 self._print_directory_timings(run_results) |
| 158 | 159 |
| 159 def _print_worker_statistics(self, run_results, num_workers): | 160 def _print_worker_statistics(self, run_results, num_workers): |
| 160 self._print_debug("Thread timing:") | 161 self._print_debug('Thread timing:') |
| 161 stats = {} | 162 stats = {} |
| 162 cuml_time = 0 | 163 cuml_time = 0 |
| 163 for result in run_results.results_by_name.values(): | 164 for result in run_results.results_by_name.values(): |
| 164 stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time':
0}) | 165 stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time':
0}) |
| 165 stats[result.worker_name]['num_tests'] += 1 | 166 stats[result.worker_name]['num_tests'] += 1 |
| 166 stats[result.worker_name]['total_time'] += result.total_run_time | 167 stats[result.worker_name]['total_time'] += result.total_run_time |
| 167 cuml_time += result.total_run_time | 168 cuml_time += result.total_run_time |
| 168 | 169 |
| 169 for worker_name in stats: | 170 for worker_name in stats: |
| 170 self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name,
stats[worker_name]['num_tests'], stats[worker_name]['total_time'])) | 171 self._print_debug( |
| 171 self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cum
l_time / num_workers)) | 172 ' %10s: %5d tests, %6.2f secs' % |
| 172 self._print_debug("") | 173 (worker_name, |
| 174 stats[worker_name]['num_tests'], |
| 175 stats[worker_name]['total_time'])) |
| 176 self._print_debug(' %6.2f cumulative, %6.2f optimal' % (cuml_time, cum
l_time / num_workers)) |
| 177 self._print_debug('') |
| 173 | 178 |
| 174 def _print_aggregate_test_statistics(self, run_results): | 179 def _print_aggregate_test_statistics(self, run_results): |
| 175 times_for_dump_render_tree = [result.test_run_time for result in run_res
ults.results_by_name.values()] | 180 times_for_dump_render_tree = [result.test_run_time for result in run_res
ults.results_by_name.values()] |
| 176 self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (sec
onds):", times_for_dump_render_tree) | 181 self._print_statistics_for_test_timings('PER TEST TIME IN TESTSHELL (sec
onds):', times_for_dump_render_tree) |
| 177 | 182 |
| 178 def _print_individual_test_times(self, run_results): | 183 def _print_individual_test_times(self, run_results): |
| 179 # Reverse-sort by the time spent in the driver. | 184 # Reverse-sort by the time spent in the driver. |
| 180 | 185 |
| 181 individual_test_timings = sorted(run_results.results_by_name.values(), k
ey=lambda result: result.test_run_time, reverse=True) | 186 individual_test_timings = sorted( |
| 187 run_results.results_by_name.values(), |
| 188 key=lambda result: result.test_run_time, |
| 189 reverse=True) |
| 182 num_printed = 0 | 190 num_printed = 0 |
| 183 slow_tests = [] | 191 slow_tests = [] |
| 184 timeout_or_crash_tests = [] | 192 timeout_or_crash_tests = [] |
| 185 unexpected_slow_tests = [] | 193 unexpected_slow_tests = [] |
| 186 for test_tuple in individual_test_timings: | 194 for test_tuple in individual_test_timings: |
| 187 test_name = test_tuple.test_name | 195 test_name = test_tuple.test_name |
| 188 is_timeout_crash_or_slow = False | 196 is_timeout_crash_or_slow = False |
| 189 if test_name in run_results.slow_tests: | 197 if test_name in run_results.slow_tests: |
| 190 is_timeout_crash_or_slow = True | 198 is_timeout_crash_or_slow = True |
| 191 slow_tests.append(test_tuple) | 199 slow_tests.append(test_tuple) |
| 192 | 200 |
| 193 if test_name in run_results.failures_by_name: | 201 if test_name in run_results.failures_by_name: |
| 194 result = run_results.results_by_name[test_name].type | 202 result = run_results.results_by_name[test_name].type |
| 195 if (result == test_expectations.TIMEOUT or | 203 if (result == test_expectations.TIMEOUT or |
| 196 result == test_expectations.CRASH): | 204 result == test_expectations.CRASH): |
| 197 is_timeout_crash_or_slow = True | 205 is_timeout_crash_or_slow = True |
| 198 timeout_or_crash_tests.append(test_tuple) | 206 timeout_or_crash_tests.append(test_tuple) |
| 199 | 207 |
| 200 if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO
_LOG): | 208 if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO
_LOG): |
| 201 num_printed = num_printed + 1 | 209 num_printed = num_printed + 1 |
| 202 unexpected_slow_tests.append(test_tuple) | 210 unexpected_slow_tests.append(test_tuple) |
| 203 | 211 |
| 204 self._print_debug("") | 212 self._print_debug('') |
| 205 if unexpected_slow_tests: | 213 if unexpected_slow_tests: |
| 206 self._print_test_list_timing("%s slowest tests that are not marked a
s SLOW and did not timeout/crash:" % | 214 self._print_test_list_timing('%s slowest tests that are not marked a
s SLOW and did not timeout/crash:' % |
| 207 NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) | 215 NUM_SLOW_TESTS_TO_LOG, unexpected_slow_
tests) |
| 208 self._print_debug("") | 216 self._print_debug('') |
| 209 | 217 |
| 210 if slow_tests: | 218 if slow_tests: |
| 211 self._print_test_list_timing("Tests marked as SLOW:", slow_tests) | 219 self._print_test_list_timing('Tests marked as SLOW:', slow_tests) |
| 212 self._print_debug("") | 220 self._print_debug('') |
| 213 | 221 |
| 214 if timeout_or_crash_tests: | 222 if timeout_or_crash_tests: |
| 215 self._print_test_list_timing("Tests that timed out or crashed:", tim
eout_or_crash_tests) | 223 self._print_test_list_timing('Tests that timed out or crashed:', tim
eout_or_crash_tests) |
| 216 self._print_debug("") | 224 self._print_debug('') |
| 217 | 225 |
| 218 def _print_test_list_timing(self, title, test_list): | 226 def _print_test_list_timing(self, title, test_list): |
| 219 self._print_debug(title) | 227 self._print_debug(title) |
| 220 for test_tuple in test_list: | 228 for test_tuple in test_list: |
| 221 test_run_time = round(test_tuple.test_run_time, 1) | 229 test_run_time = round(test_tuple.test_run_time, 1) |
| 222 self._print_debug(" %s took %s seconds" % (test_tuple.test_name, te
st_run_time)) | 230 self._print_debug(' %s took %s seconds' % (test_tuple.test_name, te
st_run_time)) |
| 223 | 231 |
| 224 def _print_directory_timings(self, run_results): | 232 def _print_directory_timings(self, run_results): |
| 225 stats = {} | 233 stats = {} |
| 226 for result in run_results.results_by_name.values(): | 234 for result in run_results.results_by_name.values(): |
| 227 stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0
}) | 235 stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0
}) |
| 228 stats[result.shard_name]['num_tests'] += 1 | 236 stats[result.shard_name]['num_tests'] += 1 |
| 229 stats[result.shard_name]['total_time'] += result.total_run_time | 237 stats[result.shard_name]['total_time'] += result.total_run_time |
| 230 | 238 |
| 231 min_seconds_to_print = 15 | 239 min_seconds_to_print = 15 |
| 232 | 240 |
| 233 timings = [] | 241 timings = [] |
| 234 for directory in stats: | 242 for directory in stats: |
| 235 rounded_time = round(stats[directory]['total_time'], 1) | 243 rounded_time = round(stats[directory]['total_time'], 1) |
| 236 if rounded_time > min_seconds_to_print: | 244 if rounded_time > min_seconds_to_print: |
| 237 timings.append((directory, rounded_time, stats[directory]['num_t
ests'])) | 245 timings.append((directory, rounded_time, stats[directory]['num_t
ests'])) |
| 238 | 246 |
| 239 if not timings: | 247 if not timings: |
| 240 return | 248 return |
| 241 | 249 |
| 242 timings.sort() | 250 timings.sort() |
| 243 | 251 |
| 244 self._print_debug("Time to process slowest subdirectories:") | 252 self._print_debug('Time to process slowest subdirectories:') |
| 245 for timing in timings: | 253 for timing in timings: |
| 246 self._print_debug(" %s took %s seconds to run %s tests." % timing) | 254 self._print_debug(' %s took %s seconds to run %s tests.' % timing) |
| 247 self._print_debug("") | 255 self._print_debug('') |
| 248 | 256 |
| 249 def _print_statistics_for_test_timings(self, title, timings): | 257 def _print_statistics_for_test_timings(self, title, timings): |
| 250 self._print_debug(title) | 258 self._print_debug(title) |
| 251 timings.sort() | 259 timings.sort() |
| 252 | 260 |
| 253 num_tests = len(timings) | 261 num_tests = len(timings) |
| 254 if not num_tests: | 262 if not num_tests: |
| 255 return | 263 return |
| 256 percentile90 = timings[int(.9 * num_tests)] | 264 percentile90 = timings[int(.9 * num_tests)] |
| 257 percentile99 = timings[int(.99 * num_tests)] | 265 percentile99 = timings[int(.99 * num_tests)] |
| 258 | 266 |
| 259 if num_tests % 2 == 1: | 267 if num_tests % 2 == 1: |
| 260 median = timings[((num_tests - 1) / 2) - 1] | 268 median = timings[((num_tests - 1) / 2) - 1] |
| 261 else: | 269 else: |
| 262 lower = timings[num_tests / 2 - 1] | 270 lower = timings[num_tests / 2 - 1] |
| 263 upper = timings[num_tests / 2] | 271 upper = timings[num_tests / 2] |
| 264 median = (float(lower + upper)) / 2 | 272 median = (float(lower + upper)) / 2 |
| 265 | 273 |
| 266 mean = sum(timings) / num_tests | 274 mean = sum(timings) / num_tests |
| 267 | 275 |
| 268 for timing in timings: | 276 for timing in timings: |
| 269 sum_of_deviations = math.pow(timing - mean, 2) | 277 sum_of_deviations = math.pow(timing - mean, 2) |
| 270 | 278 |
| 271 std_deviation = math.sqrt(sum_of_deviations / num_tests) | 279 std_deviation = math.sqrt(sum_of_deviations / num_tests) |
| 272 self._print_debug(" Median: %6.3f" % median) | 280 self._print_debug(' Median: %6.3f' % median) |
| 273 self._print_debug(" Mean: %6.3f" % mean) | 281 self._print_debug(' Mean: %6.3f' % mean) |
| 274 self._print_debug(" 90th percentile: %6.3f" % percentile90) | 282 self._print_debug(' 90th percentile: %6.3f' % percentile90) |
| 275 self._print_debug(" 99th percentile: %6.3f" % percentile99) | 283 self._print_debug(' 99th percentile: %6.3f' % percentile99) |
| 276 self._print_debug(" Standard dev: %6.3f" % std_deviation) | 284 self._print_debug(' Standard dev: %6.3f' % std_deviation) |
| 277 self._print_debug("") | 285 self._print_debug('') |
| 278 | 286 |
| 279 def _print_one_line_summary(self, total_time, run_results): | 287 def _print_one_line_summary(self, total_time, run_results): |
| 280 if self._options.timing: | 288 if self._options.timing: |
| 281 parallel_time = sum(result.total_run_time for result in run_results.
results_by_name.values()) | 289 parallel_time = sum(result.total_run_time for result in run_results.
results_by_name.values()) |
| 282 | 290 |
| 283 # There is serial overhead in layout_test_runner.run() that we can't
easily account for when | 291 # There is serial overhead in layout_test_runner.run() that we can't
easily account for when |
| 284 # really running in parallel, but taking the min() ensures that in t
he worst case | 292 # really running in parallel, but taking the min() ensures that in t
he worst case |
| 285 # (if parallel time is less than run_time) we do account for it. | 293 # (if parallel time is less than run_time) we do account for it. |
| 286 serial_time = total_time - min(run_results.run_time, parallel_time) | 294 serial_time = total_time - min(run_results.run_time, parallel_time) |
| 287 | 295 |
| 288 speedup = (parallel_time + serial_time) / total_time | 296 speedup = (parallel_time + serial_time) / total_time |
| 289 timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, se
rial_time, speedup) | 297 timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, se
rial_time, speedup) |
| 290 else: | 298 else: |
| 291 timing_summary = '' | 299 timing_summary = '' |
| 292 | 300 |
| 293 total = run_results.total - run_results.expected_skips | 301 total = run_results.total - run_results.expected_skips |
| 294 expected = run_results.expected - run_results.expected_skips | 302 expected = run_results.expected - run_results.expected_skips |
| 295 unexpected = run_results.unexpected | 303 unexpected = run_results.unexpected |
| 296 incomplete = total - expected - unexpected | 304 incomplete = total - expected - unexpected |
| 297 incomplete_str = '' | 305 incomplete_str = '' |
| 298 if incomplete: | 306 if incomplete: |
| 299 self._print_default("") | 307 self._print_default('') |
| 300 incomplete_str = " (%d didn't run)" % incomplete | 308 incomplete_str = " (%d didn't run)" % incomplete |
| 301 | 309 |
| 302 if self._options.verbose or self._options.debug_rwt_logging or unexpecte
d: | 310 if self._options.verbose or self._options.debug_rwt_logging or unexpecte
d: |
| 303 self.writeln("") | 311 self.writeln('') |
| 304 | 312 |
| 305 expected_summary_str = '' | 313 expected_summary_str = '' |
| 306 if run_results.expected_failures > 0: | 314 if run_results.expected_failures > 0: |
| 307 expected_summary_str = " (%d passed, %d didn't)" % (expected - run_r
esults.expected_failures, run_results.expected_failures) | 315 expected_summary_str = " (%d passed, %d didn't)" % ( |
| 316 expected - run_results.expected_failures, run_results.expected_f
ailures) |
| 308 | 317 |
| 309 summary = '' | 318 summary = '' |
| 310 if unexpected == 0: | 319 if unexpected == 0: |
| 311 if expected == total: | 320 if expected == total: |
| 312 if expected > 1: | 321 if expected > 1: |
| 313 summary = "All %d tests ran as expected%s%s." % (expected, e
xpected_summary_str, timing_summary) | 322 summary = 'All %d tests ran as expected%s%s.' % (expected, e
xpected_summary_str, timing_summary) |
| 314 else: | 323 else: |
| 315 summary = "The test ran as expected%s%s." % (expected_summar
y_str, timing_summary) | 324 summary = 'The test ran as expected%s%s.' % (expected_summar
y_str, timing_summary) |
| 316 else: | 325 else: |
| 317 summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test
', expected), expected_summary_str, incomplete_str, timing_summary) | 326 summary = '%s ran as expected%s%s%s.' % (grammar.pluralize( |
| 327 'test', |
| 328 expected), |
| 329 expected_summary_str, |
| 330 incomplete_str, |
| 331 timing_summary) |
| 318 else: | 332 else: |
| 319 summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluraliz
e('test', expected), expected_summary_str, unexpected, incomplete_str, timing_su
mmary) | 333 summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluraliz
e( |
| 334 'test', |
| 335 expected), |
| 336 expected_summary_str, |
| 337 unexpected, |
| 338 incomplete_str, |
| 339 timing_summary) |
| 320 | 340 |
| 321 self._print_quiet(summary) | 341 self._print_quiet(summary) |
| 322 self._print_quiet("") | 342 self._print_quiet('') |
| 323 | 343 |
| 324 def _test_status_line(self, test_name, suffix): | 344 def _test_status_line(self, test_name, suffix): |
| 325 format_string = '[%d/%d] %s%s' | 345 format_string = '[%d/%d] %s%s' |
| 326 status_line = format_string % (self.num_completed, self.num_tests, test_
name, suffix) | 346 status_line = format_string % (self.num_completed, self.num_tests, test_
name, suffix) |
| 327 if len(status_line) > self._meter.number_of_columns(): | 347 if len(status_line) > self._meter.number_of_columns(): |
| 328 overflow_columns = len(status_line) - self._meter.number_of_columns(
) | 348 overflow_columns = len(status_line) - self._meter.number_of_columns(
) |
| 329 ellipsis = '...' | 349 ellipsis = '...' |
| 330 if len(test_name) < overflow_columns + len(ellipsis) + 2: | 350 if len(test_name) < overflow_columns + len(ellipsis) + 2: |
| 331 # We don't have enough space even if we elide, just show the tes
t filename. | 351 # We don't have enough space even if we elide, just show the tes
t filename. |
| 332 fs = self._port.host.filesystem | 352 fs = self._port.host.filesystem |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 390 args = ' '.join(self._port.lookup_virtual_test_args(test_name)) | 410 args = ' '.join(self._port.lookup_virtual_test_args(test_name)) |
| 391 self._print_default(' base: %s' % base) | 411 self._print_default(' base: %s' % base) |
| 392 self._print_default(' args: %s' % args) | 412 self._print_default(' args: %s' % args) |
| 393 | 413 |
| 394 references = self._port.reference_files(test_name) | 414 references = self._port.reference_files(test_name) |
| 395 if references: | 415 if references: |
| 396 for _, filename in references: | 416 for _, filename in references: |
| 397 self._print_default(' ref: %s' % self._port.relative_test_filen
ame(filename)) | 417 self._print_default(' ref: %s' % self._port.relative_test_filen
ame(filename)) |
| 398 else: | 418 else: |
| 399 for extension in ('.txt', '.png', '.wav'): | 419 for extension in ('.txt', '.png', '.wav'): |
| 400 self._print_baseline(test_name, extension) | 420 self._print_baseline(test_name, extension) |
| 401 | 421 |
| 402 self._print_default(' exp: %s' % exp_str) | 422 self._print_default(' exp: %s' % exp_str) |
| 403 self._print_default(' got: %s' % got_str) | 423 self._print_default(' got: %s' % got_str) |
| 404 self._print_default(' took: %-.3f' % result.test_run_time) | 424 self._print_default(' took: %-.3f' % result.test_run_time) |
| 405 self._print_default('') | 425 self._print_default('') |
| 406 | 426 |
| 407 def _print_baseline(self, test_name, extension): | 427 def _print_baseline(self, test_name, extension): |
| 408 baseline = self._port.expected_filename(test_name, extension) | 428 baseline = self._port.expected_filename(test_name, extension) |
| 409 if self._port._filesystem.exists(baseline): | 429 if self._port._filesystem.exists(baseline): |
| 410 relpath = self._port.relative_test_filename(baseline) | 430 relpath = self._port.relative_test_filename(baseline) |
| (...skipping 16 matching lines...) Expand all Loading... |
| 427 self._meter.write_throttled_update(msg) | 447 self._meter.write_throttled_update(msg) |
| 428 | 448 |
| 429 def write_update(self, msg): | 449 def write_update(self, msg): |
| 430 self._meter.write_update(msg) | 450 self._meter.write_update(msg) |
| 431 | 451 |
| 432 def writeln(self, msg): | 452 def writeln(self, msg): |
| 433 self._meter.writeln(msg) | 453 self._meter.writeln(msg) |
| 434 | 454 |
| 435 def flush(self): | 455 def flush(self): |
| 436 self._meter.flush() | 456 self._meter.flush() |
| OLD | NEW |