| OLD | NEW |
| 1 # Copyright (C) 2012 Google Inc. All rights reserved. | 1 # Copyright (C) 2012 Google Inc. All rights reserved. |
| 2 # Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights rese
rved. | 2 # Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights rese
rved. |
| 3 # | 3 # |
| 4 # Redistribution and use in source and binary forms, with or without | 4 # Redistribution and use in source and binary forms, with or without |
| 5 # modification, are permitted provided that the following conditions are | 5 # modification, are permitted provided that the following conditions are |
| 6 # met: | 6 # met: |
| 7 # | 7 # |
| 8 # * Redistributions of source code must retain the above copyright | 8 # * Redistributions of source code must retain the above copyright |
| 9 # notice, this list of conditions and the following disclaimer. | 9 # notice, this list of conditions and the following disclaimer. |
| 10 # * Redistributions in binary form must reproduce the above | 10 # * Redistributions in binary form must reproduce the above |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 113 for _ in xrange(self._test_runner_count): | 113 for _ in xrange(self._test_runner_count): |
| 114 driver = self._create_driver() | 114 driver = self._create_driver() |
| 115 try: | 115 try: |
| 116 if not self._run_with_driver(driver, time_out_ms): | 116 if not self._run_with_driver(driver, time_out_ms): |
| 117 return None | 117 return None |
| 118 finally: | 118 finally: |
| 119 driver.stop() | 119 driver.stop() |
| 120 | 120 |
| 121 should_log = not self._port.get_option('profile') | 121 should_log = not self._port.get_option('profile') |
| 122 if should_log and self._description: | 122 if should_log and self._description: |
| 123 _log.info('DESCRIPTION: %s' % self._description) | 123 _log.info('DESCRIPTION: %s', self._description) |
| 124 | 124 |
| 125 results = {} | 125 results = {} |
| 126 for metric_name in self._ordered_metrics_name: | 126 for metric_name in self._ordered_metrics_name: |
| 127 metric = self._metrics[metric_name] | 127 metric = self._metrics[metric_name] |
| 128 results[metric.name()] = metric.grouped_iteration_values() | 128 results[metric.name()] = metric.grouped_iteration_values() |
| 129 if should_log: | 129 if should_log: |
| 130 legacy_chromium_bot_compatible_name = self.test_name_without_fil
e_extension().replace('/', ': ') | 130 legacy_chromium_bot_compatible_name = self.test_name_without_fil
e_extension().replace('/', ': ') |
| 131 self.log_statistics(legacy_chromium_bot_compatible_name + ': ' +
metric.name(), | 131 self.log_statistics(legacy_chromium_bot_compatible_name + ': ' +
metric.name(), |
| 132 metric.flattened_iteration_values(), metric.
unit()) | 132 metric.flattened_iteration_values(), metric.
unit()) |
| 133 | 133 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 144 delta = time - mean | 144 delta = time - mean |
| 145 sweep = i + 1.0 | 145 sweep = i + 1.0 |
| 146 mean += delta / sweep | 146 mean += delta / sweep |
| 147 square_sum += delta * (time - mean) | 147 square_sum += delta * (time - mean) |
| 148 | 148 |
| 149 middle = int(len(sorted_values) / 2) | 149 middle = int(len(sorted_values) / 2) |
| 150 mean = sum(sorted_values) / len(values) | 150 mean = sum(sorted_values) / len(values) |
| 151 median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_va
lues[middle - 1] + sorted_values[middle]) / 2 | 151 median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_va
lues[middle - 1] + sorted_values[middle]) / 2 |
| 152 stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_v
alues) > 1 else 0 | 152 stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_v
alues) > 1 else 0 |
| 153 | 153 |
| 154 _log.info('RESULT %s= %s %s' % (test_name, mean, unit)) | 154 _log.info('RESULT %s= %s %s', test_name, mean, unit) |
| 155 _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s' % | 155 _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s', |
| 156 (median, unit, stdev, unit, sorted_values[0], unit, sorted_val
ues[-1], unit)) | 156 median, unit, stdev, unit, sorted_values[0], unit, sorted_valu
es[-1], unit) |
| 157 | 157 |
| 158 _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IG
NORECASE) | 158 _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IG
NORECASE) |
| 159 _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):') | 159 _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):') |
| 160 _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values'
] | 160 _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values'
] |
| 161 _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s
+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)') | 161 _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s
+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)') |
| 162 _console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):') | 162 _console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):') |
| 163 | 163 |
| 164 def _run_with_driver(self, driver, time_out_ms): | 164 def _run_with_driver(self, driver, time_out_ms): |
| 165 output = self.run_single(driver, self.test_path(), time_out_ms) | 165 output = self.run_single(driver, self.test_path(), time_out_ms) |
| 166 self._filter_output(output) | 166 self._filter_output(output) |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 198 self._metrics[metric_name] = PerfTestMetric(metric_name, unit) | 198 self._metrics[metric_name] = PerfTestMetric(metric_name, unit) |
| 199 self._ordered_metrics_name.append(metric_name) | 199 self._ordered_metrics_name.append(metric_name) |
| 200 return self._metrics[metric_name] | 200 return self._metrics[metric_name] |
| 201 | 201 |
| 202 def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=F
alse): | 202 def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=F
alse): |
| 203 return driver.run_test( | 203 return driver.run_test( |
| 204 DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixe
l_test=should_run_pixel_test, args=[]), stop_when_done=False) | 204 DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixe
l_test=should_run_pixel_test, args=[]), stop_when_done=False) |
| 205 | 205 |
| 206 def run_failed(self, output): | 206 def run_failed(self, output): |
| 207 if output.error: | 207 if output.error: |
| 208 _log.error('error: %s\n%s' % (self.test_name(), output.error)) | 208 _log.error('error: %s\n%s', self.test_name(), output.error) |
| 209 | 209 |
| 210 if output.text is None: | 210 if output.text is None: |
| 211 pass | 211 pass |
| 212 elif output.timeout: | 212 elif output.timeout: |
| 213 _log.error('timeout: %s' % self.test_name()) | 213 _log.error('timeout: %s', self.test_name()) |
| 214 elif output.crash: | 214 elif output.crash: |
| 215 _log.error('crash: %s' % self.test_name()) | 215 _log.error('crash: %s', self.test_name()) |
| 216 else: | 216 else: |
| 217 return False | 217 return False |
| 218 | 218 |
| 219 return True | 219 return True |
| 220 | 220 |
| 221 @staticmethod | 221 @staticmethod |
| 222 def _should_ignore_line(regexps, line): | 222 def _should_ignore_line(regexps, line): |
| 223 if not line: | 223 if not line: |
| 224 return True | 224 return True |
| 225 for regexp in regexps: | 225 for regexp in regexps: |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 310 (re.compile(r'^Dromaeo/'), SingleProcessPerfTest), | 310 (re.compile(r'^Dromaeo/'), SingleProcessPerfTest), |
| 311 (re.compile(r'^inspector/'), ChromiumStylePerfTest), | 311 (re.compile(r'^inspector/'), ChromiumStylePerfTest), |
| 312 ] | 312 ] |
| 313 | 313 |
| 314 @classmethod | 314 @classmethod |
| 315 def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_T
EST_RUNNER_COUNT): | 315 def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_T
EST_RUNNER_COUNT): |
| 316 for (pattern, test_class) in cls._pattern_map: | 316 for (pattern, test_class) in cls._pattern_map: |
| 317 if pattern.match(test_name): | 317 if pattern.match(test_name): |
| 318 return test_class(port, test_name, path, test_runner_count) | 318 return test_class(port, test_name, path, test_runner_count) |
| 319 return PerfTest(port, test_name, path, test_runner_count) | 319 return PerfTest(port, test_name, path, test_runner_count) |
| OLD | NEW |