Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Performance Test Bisect Tool | 6 """Performance Test Bisect Tool |
| 7 | 7 |
| 8 This script bisects a series of changelists using binary search. It starts at | 8 This script bisects a series of changelists using binary search. It starts at |
| 9 a bad revision where a performance metric has regressed, and asks for a last | 9 a bad revision where a performance metric has regressed, and asks for a last |
| 10 known-good revision. It will then binary search across this revision range by | 10 known-good revision. It will then binary search across this revision range by |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 170 # once build is produced, it reads SHA value from this file and appends it | 170 # once build is produced, it reads SHA value from this file and appends it |
| 171 # to build archive filename. | 171 # to build archive filename. |
| 172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha | 172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha |
| 173 new file mode 100644 | 173 new file mode 100644 |
| 174 --- /dev/null | 174 --- /dev/null |
| 175 +++ src/DEPS.sha | 175 +++ src/DEPS.sha |
| 176 @@ -0,0 +1 @@ | 176 @@ -0,0 +1 @@ |
| 177 +%(deps_sha)s | 177 +%(deps_sha)s |
| 178 """ | 178 """ |
| 179 | 179 |
| 180 BISECT_MODE_MEAN = 'mean' | |
| 181 BISECT_MODE_STD_DEV = 'std_dev' | |
| 182 BISECT_MODE_RETURN_CODE = 'return_code' | |
|
qyearsley
2014/04/25 23:53:29
Could add a comment about what these three constan
| |
| 183 | |
| 184 | |
| 180 def _AddAdditionalDepotInfo(depot_info): | 185 def _AddAdditionalDepotInfo(depot_info): |
| 181 """Adds additional depot info to the global depot variables.""" | 186 """Adds additional depot info to the global depot variables.""" |
| 182 global DEPOT_DEPS_NAME | 187 global DEPOT_DEPS_NAME |
| 183 global DEPOT_NAMES | 188 global DEPOT_NAMES |
| 184 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + | 189 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + |
| 185 depot_info.items()) | 190 depot_info.items()) |
| 186 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() | 191 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() |
| 187 | 192 |
| 188 | 193 |
| 189 def CalculateTruncatedMean(data_set, truncate_percent): | 194 def CalculateTruncatedMean(data_set, truncate_percent): |
| (...skipping 1771 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1961 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') | 1966 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') |
| 1962 | 1967 |
| 1963 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): | 1968 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): |
| 1964 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) | 1969 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) |
| 1965 return not RunProcess(['python', path_to_generate, | 1970 return not RunProcess(['python', path_to_generate, |
| 1966 '--profile-type-to-generate', profile_type, | 1971 '--profile-type-to-generate', profile_type, |
| 1967 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) | 1972 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) |
| 1968 return False | 1973 return False |
| 1969 return True | 1974 return True |
| 1970 | 1975 |
| 1976 def _IsBisectModeUsingMetric(self): | |
| 1977 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV] | |
| 1978 | |
| 1979 def _IsBisectModeReturnCode(self): | |
| 1980 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] | |
| 1981 | |
| 1982 def _IsBisectModeStandardDeviation(self): | |
| 1983 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] | |
| 1984 | |
| 1971 def RunPerformanceTestAndParseResults( | 1985 def RunPerformanceTestAndParseResults( |
| 1972 self, command_to_run, metric, reset_on_first_run=False, | 1986 self, command_to_run, metric, reset_on_first_run=False, |
| 1973 upload_on_last_run=False, results_label=None): | 1987 upload_on_last_run=False, results_label=None): |
| 1974 """Runs a performance test on the current revision and parses the results. | 1988 """Runs a performance test on the current revision and parses the results. |
| 1975 | 1989 |
| 1976 Args: | 1990 Args: |
| 1977 command_to_run: The command to be run to execute the performance test. | 1991 command_to_run: The command to be run to execute the performance test. |
| 1978 metric: The metric to parse out from the results of the performance test. | 1992 metric: The metric to parse out from the results of the performance test. |
| 1979 This is the result chart name and trace name, separated by slash. | 1993 This is the result chart name and trace name, separated by slash. |
| 1980 reset_on_first_run: If True, pass the flag --reset-results on first run. | 1994 reset_on_first_run: If True, pass the flag --reset-results on first run. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2015 if self.opts.target_platform == 'cros' and is_telemetry: | 2029 if self.opts.target_platform == 'cros' and is_telemetry: |
| 2016 args.append('--remote=%s' % self.opts.cros_remote_ip) | 2030 args.append('--remote=%s' % self.opts.cros_remote_ip) |
| 2017 args.append('--identity=%s' % CROS_TEST_KEY_PATH) | 2031 args.append('--identity=%s' % CROS_TEST_KEY_PATH) |
| 2018 | 2032 |
| 2019 start_time = time.time() | 2033 start_time = time.time() |
| 2020 | 2034 |
| 2021 metric_values = [] | 2035 metric_values = [] |
| 2022 output_of_all_runs = '' | 2036 output_of_all_runs = '' |
| 2023 for i in xrange(self.opts.repeat_test_count): | 2037 for i in xrange(self.opts.repeat_test_count): |
| 2024 # Can ignore the return code since if the tests fail, it won't return 0. | 2038 # Can ignore the return code since if the tests fail, it won't return 0. |
| 2039 current_args = copy.copy(args) | |
| 2040 if is_telemetry: | |
| 2041 if i == 0 and reset_on_first_run: | |
| 2042 current_args.append('--reset-results') | |
| 2043 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: | |
| 2044 current_args.append('--upload-results') | |
| 2045 if results_label: | |
| 2046 current_args.append('--results-label=%s' % results_label) | |
| 2025 try: | 2047 try: |
| 2026 current_args = copy.copy(args) | |
| 2027 if is_telemetry: | |
| 2028 if i == 0 and reset_on_first_run: | |
| 2029 current_args.append('--reset-results') | |
| 2030 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: | |
| 2031 current_args.append('--upload-results') | |
| 2032 if results_label: | |
| 2033 current_args.append('--results-label=%s' % results_label) | |
| 2034 (output, return_code) = RunProcessAndRetrieveOutput(current_args, | 2048 (output, return_code) = RunProcessAndRetrieveOutput(current_args, |
| 2035 cwd=self.src_cwd) | 2049 cwd=self.src_cwd) |
| 2036 except OSError, e: | 2050 except OSError, e: |
| 2037 if e.errno == errno.ENOENT: | 2051 if e.errno == errno.ENOENT: |
| 2038 err_text = ('Something went wrong running the performance test. ' | 2052 err_text = ('Something went wrong running the performance test. ' |
| 2039 'Please review the command line:\n\n') | 2053 'Please review the command line:\n\n') |
| 2040 if 'src/' in ' '.join(args): | 2054 if 'src/' in ' '.join(args): |
| 2041 err_text += ('Check that you haven\'t accidentally specified a ' | 2055 err_text += ('Check that you haven\'t accidentally specified a ' |
| 2042 'path with src/ in the command.\n\n') | 2056 'path with src/ in the command.\n\n') |
| 2043 err_text += ' '.join(args) | 2057 err_text += ' '.join(args) |
| 2044 err_text += '\n' | 2058 err_text += '\n' |
| 2045 | 2059 |
| 2046 return (err_text, failure_code) | 2060 return (err_text, failure_code) |
| 2047 raise | 2061 raise |
| 2048 | 2062 |
| 2049 output_of_all_runs += output | 2063 output_of_all_runs += output |
| 2050 if self.opts.output_buildbot_annotations: | 2064 if self.opts.output_buildbot_annotations: |
| 2051 print output | 2065 print output |
| 2052 | 2066 |
| 2053 metric_values += self.ParseMetricValuesFromOutput(metric, output) | 2067 if self._IsBisectModeUsingMetric(): |
| 2068 metric_values += self.ParseMetricValuesFromOutput(metric, output) | |
| 2069 # If we're bisecting on a metric (ie, changes in the mean or | |
| 2070 # standard deviation) and no metric values are produced, bail out. | |
| 2071 if not metric_values: | |
| 2072 break | |
| 2073 elif self._IsBisectModeReturnCode(): | |
| 2074 metric_values.append(return_code) | |
| 2054 | 2075 |
| 2055 elapsed_minutes = (time.time() - start_time) / 60.0 | 2076 elapsed_minutes = (time.time() - start_time) / 60.0 |
| 2056 | 2077 if elapsed_minutes >= self.opts.max_time_minutes: |
| 2057 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values: | |
| 2058 break | 2078 break |
| 2059 | 2079 |
| 2060 if len(metric_values) == 0: | 2080 if len(metric_values) == 0: |
| 2061 err_text = 'Metric %s was not found in the test output.' % metric | 2081 err_text = 'Metric %s was not found in the test output.' % metric |
| 2062 # TODO(qyearsley): Consider also getting and displaying a list of metrics | 2082 # TODO(qyearsley): Consider also getting and displaying a list of metrics |
| 2063 # that were found in the output here. | 2083 # that were found in the output here. |
| 2064 return (err_text, failure_code, output_of_all_runs) | 2084 return (err_text, failure_code, output_of_all_runs) |
| 2065 | 2085 |
| 2066 # Need to get the average value if there were multiple values. | 2086 # If we're bisecting on return codes, we're really just looking for zero vs |
| 2067 truncated_mean = CalculateTruncatedMean(metric_values, | 2087 # non-zero. |
| 2068 self.opts.truncate_percent) | 2088 if self._IsBisectModeReturnCode(): |
| 2069 standard_err = CalculateStandardError(metric_values) | 2089 # If any of the return codes is non-zero, output 1. |
| 2070 standard_dev = CalculateStandardDeviation(metric_values) | 2090 overall_return_code = 0 if ( |
| 2091 all(current_value == 0 for current_value in metric_values)) else 1 | |
| 2071 | 2092 |
| 2072 values = { | 2093 values = { |
| 2073 'mean': truncated_mean, | 2094 'mean': overall_return_code, |
| 2074 'std_err': standard_err, | 2095 'std_err': 0.0, |
| 2075 'std_dev': standard_dev, | 2096 'std_dev': 0.0, |
| 2076 'values': metric_values, | 2097 'values': metric_values, |
| 2077 } | 2098 } |
|
qyearsley
2014/04/25 23:53:29
It's potentially confusing that "mean" could be ov
| |
| 2078 | 2099 |
| 2079 print 'Results of performance test: %12f %12f' % ( | 2100 print 'Results of performance test: Command returned with %d' % ( |
| 2080 truncated_mean, standard_err) | 2101 overall_return_code) |
| 2081 print | 2102 print |
| 2103 else: | |
| 2104 # Need to get the average value if there were multiple values. | |
| 2105 truncated_mean = CalculateTruncatedMean(metric_values, | |
| 2106 self.opts.truncate_percent) | |
| 2107 standard_err = CalculateStandardError(metric_values) | |
| 2108 standard_dev = CalculateStandardDeviation(metric_values) | |
| 2109 | |
| 2110 if self._IsBisectModeStandardDeviation(): | |
| 2111 metric_values = [standard_dev] | |
| 2112 | |
| 2113 values = { | |
| 2114 'mean': truncated_mean, | |
| 2115 'std_err': standard_err, | |
| 2116 'std_dev': standard_dev, | |
| 2117 'values': metric_values, | |
| 2118 } | |
| 2119 | |
| 2120 print 'Results of performance test: %12f %12f' % ( | |
| 2121 truncated_mean, standard_err) | |
| 2122 print | |
| 2082 return (values, success_code, output_of_all_runs) | 2123 return (values, success_code, output_of_all_runs) |
| 2083 | 2124 |
| 2084 def FindAllRevisionsToSync(self, revision, depot): | 2125 def FindAllRevisionsToSync(self, revision, depot): |
| 2085 """Finds all dependant revisions and depots that need to be synced for a | 2126 """Finds all dependant revisions and depots that need to be synced for a |
| 2086 given revision. This is only useful in the git workflow, as an svn depot | 2127 given revision. This is only useful in the git workflow, as an svn depot |
| 2087 may be split into multiple mirrors. | 2128 may be split into multiple mirrors. |
| 2088 | 2129 |
| 2089 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and | 2130 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and |
| 2090 skia/include. To sync skia/src properly, one has to find the proper | 2131 skia/include. To sync skia/src properly, one has to find the proper |
| 2091 revisions in skia/gyp and skia/include. | 2132 revisions in skia/gyp and skia/include. |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2332 return results | 2373 return results |
| 2333 else: | 2374 else: |
| 2334 return ('Failed to build revision: [%s]' % (str(revision, )), | 2375 return ('Failed to build revision: [%s]' % (str(revision, )), |
| 2335 BUILD_RESULT_FAIL) | 2376 BUILD_RESULT_FAIL) |
| 2336 else: | 2377 else: |
| 2337 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) | 2378 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) |
| 2338 else: | 2379 else: |
| 2339 return ('Failed to sync revision: [%s]' % (str(revision, )), | 2380 return ('Failed to sync revision: [%s]' % (str(revision, )), |
| 2340 BUILD_RESULT_FAIL) | 2381 BUILD_RESULT_FAIL) |
| 2341 | 2382 |
| 2342 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): | 2383 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): |
| 2343 """Given known good and bad values, decide if the current_value passed | 2384 """Given known good and bad values, decide if the current_value passed |
| 2344 or failed. | 2385 or failed. |
| 2345 | 2386 |
| 2346 Args: | 2387 Args: |
| 2347 current_value: The value of the metric being checked. | 2388 current_value: The value of the metric being checked. |
| 2348 known_bad_value: The reference value for a "failed" run. | 2389 known_bad_value: The reference value for a "failed" run. |
| 2349 known_good_value: The reference value for a "passed" run. | 2390 known_good_value: The reference value for a "passed" run. |
| 2350 | 2391 |
| 2351 Returns: | 2392 Returns: |
| 2352 True if the current_value is closer to the known_good_value than the | 2393 True if the current_value is closer to the known_good_value than the |
| 2353 known_bad_value. | 2394 known_bad_value. |
| 2354 """ | 2395 """ |
| 2355 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean']) | 2396 if self.opts.bisect_mode == BISECT_MODE_STD_DEV: |
| 2356 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean']) | 2397 dist_to_good_value = abs(current_value['std_dev'] - |
| 2398 known_good_value['std_dev']) | |
| 2399 dist_to_bad_value = abs(current_value['std_dev'] - | |
| 2400 known_bad_value['std_dev']) | |
| 2401 else: | |
| 2402 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean']) | |
| 2403 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean']) | |
| 2357 | 2404 |
| 2358 return dist_to_good_value < dist_to_bad_value | 2405 return dist_to_good_value < dist_to_bad_value |
| 2359 | 2406 |
| 2360 def _GetDepotDirectory(self, depot_name): | 2407 def _GetDepotDirectory(self, depot_name): |
| 2361 if depot_name == 'chromium': | 2408 if depot_name == 'chromium': |
| 2362 return self.src_cwd | 2409 return self.src_cwd |
| 2363 elif depot_name == 'cros': | 2410 elif depot_name == 'cros': |
| 2364 return self.cros_cwd | 2411 return self.cros_cwd |
| 2365 elif depot_name in DEPOT_NAMES: | 2412 elif depot_name in DEPOT_NAMES: |
| 2366 return self.depot_cwd[depot_name] | 2413 return self.depot_cwd[depot_name] |
| (...skipping 535 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2902 metric, skippable=True) | 2949 metric, skippable=True) |
| 2903 | 2950 |
| 2904 # If the build is successful, check whether or not the metric | 2951 # If the build is successful, check whether or not the metric |
| 2905 # had regressed. | 2952 # had regressed. |
| 2906 if not run_results[1]: | 2953 if not run_results[1]: |
| 2907 if len(run_results) > 2: | 2954 if len(run_results) > 2: |
| 2908 next_revision_data['external'] = run_results[2] | 2955 next_revision_data['external'] = run_results[2] |
| 2909 next_revision_data['perf_time'] = run_results[3] | 2956 next_revision_data['perf_time'] = run_results[3] |
| 2910 next_revision_data['build_time'] = run_results[4] | 2957 next_revision_data['build_time'] = run_results[4] |
| 2911 | 2958 |
| 2912 passed_regression = self.CheckIfRunPassed(run_results[0], | 2959 passed_regression = self._CheckIfRunPassed(run_results[0], |
| 2913 known_good_value, | 2960 known_good_value, |
| 2914 known_bad_value) | 2961 known_bad_value) |
| 2915 | 2962 |
| 2916 next_revision_data['passed'] = passed_regression | 2963 next_revision_data['passed'] = passed_regression |
| 2917 next_revision_data['value'] = run_results[0] | 2964 next_revision_data['value'] = run_results[0] |
| 2918 | 2965 |
| 2919 if passed_regression: | 2966 if passed_regression: |
| 2920 max_revision = next_revision_index | 2967 max_revision = next_revision_index |
| 2921 else: | 2968 else: |
| 2922 min_revision = next_revision_index | 2969 min_revision = next_revision_index |
| 2923 else: | 2970 else: |
| 2924 if run_results[1] == BUILD_RESULT_SKIPPED: | 2971 if run_results[1] == BUILD_RESULT_SKIPPED: |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2959 # "Confidence in Bisection Results: 100%" to decide whether or not | 3006 # "Confidence in Bisection Results: 100%" to decide whether or not |
| 2960 # to cc the author(s). If you change this, please update the perf | 3007 # to cc the author(s). If you change this, please update the perf |
| 2961 # dashboard as well. | 3008 # dashboard as well. |
| 2962 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence'] | 3009 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence'] |
| 2963 | 3010 |
| 2964 def _PrintBanner(self, results_dict): | 3011 def _PrintBanner(self, results_dict): |
| 2965 print | 3012 print |
| 2966 print " __o_\___ Aw Snap! We hit a speed bump!" | 3013 print " __o_\___ Aw Snap! We hit a speed bump!" |
| 2967 print "=-O----O-'__.~.___________________________________" | 3014 print "=-O----O-'__.~.___________________________________" |
| 2968 print | 3015 print |
| 2969 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % ( | 3016 if self._IsBisectModeReturnCode(): |
| 2970 results_dict['regression_size'], results_dict['regression_std_err'], | 3017 print ('Bisect reproduced a change in return codes while running the ' |
| 2971 '/'.join(self.opts.metric)) | 3018 'performance test.') |
| 3019 else: | |
| 3020 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the ' | |
| 3021 '%s metric.' % (results_dict['regression_size'], | |
| 3022 results_dict['regression_std_err'], '/'.join(self.opts.metric))) | |
| 2972 self._PrintConfidence(results_dict) | 3023 self._PrintConfidence(results_dict) |
| 2973 | 3024 |
| 2974 def _PrintFailedBanner(self, results_dict): | 3025 def _PrintFailedBanner(self, results_dict): |
| 2975 print | 3026 print |
| 2976 print ('Bisect could not reproduce a change in the ' | 3027 if self._IsBisectModeReturnCode(): |
| 2977 '%s/%s metric.' % (self.opts.metric[0], self.opts.metric[1])) | 3028 print 'Bisect could not reproduce a change in the return code.' |
| 3029 else: | |
| 3030 print ('Bisect could not reproduce a change in the ' | |
| 3031 '%s metric.' % '/'.join(self.opts.metric)) | |
| 2978 print | 3032 print |
| 2979 self._PrintConfidence(results_dict) | |
| 2980 | 3033 |
| 2981 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): | 3034 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): |
| 2982 info = self.source_control.QueryRevisionInfo(cl, | 3035 info = self.source_control.QueryRevisionInfo(cl, |
| 2983 self._GetDepotDirectory(depot)) | 3036 self._GetDepotDirectory(depot)) |
| 2984 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): | 3037 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): |
| 2985 try: | 3038 try: |
| 2986 # Format is "git-svn-id: svn://....@123456 <other data>" | 3039 # Format is "git-svn-id: svn://....@123456 <other data>" |
| 2987 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] | 3040 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] |
| 2988 svn_revision = svn_line[0].split('@') | 3041 svn_revision = svn_line[0].split('@') |
| 2989 svn_revision = svn_revision[1].split(' ')[0] | 3042 svn_revision = svn_revision[1].split(' ')[0] |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 3006 print 'Link : %s' % commit_link | 3059 print 'Link : %s' % commit_link |
| 3007 else: | 3060 else: |
| 3008 print | 3061 print |
| 3009 print 'Failed to parse svn revision from body:' | 3062 print 'Failed to parse svn revision from body:' |
| 3010 print | 3063 print |
| 3011 print info['body'] | 3064 print info['body'] |
| 3012 print | 3065 print |
| 3013 print 'Commit : %s' % cl | 3066 print 'Commit : %s' % cl |
| 3014 print 'Date : %s' % info['date'] | 3067 print 'Date : %s' % info['date'] |
| 3015 | 3068 |
| 3069 def _PrintTable(self, column_widths, row_data): | |
|
tonyg
2014/04/25 23:12:07
Oops, I was thinking this would take all rows inst
shatch
2014/04/25 23:15:10
Done.
| |
| 3070 assert len(column_widths) == len(row_data) | |
| 3071 | |
| 3072 text = '' | |
| 3073 for i in xrange(len(column_widths)): | |
| 3074 current_row_data = row_data[i].center(column_widths[i], ' ') | |
| 3075 text += ('%%%ds' % column_widths[i]) % current_row_data | |
| 3076 print text | |
| 3077 | |
| 3078 def _PrintTestedCommitsHeader(self): | |
| 3079 if self.opts.bisect_mode == BISECT_MODE_MEAN: | |
| 3080 self._PrintTable( | |
| 3081 [20, 70, 14, 12, 13], | |
| 3082 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State']) | |
| 3083 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | |
| 3084 self._PrintTable( | |
| 3085 [20, 70, 14, 12, 13], | |
| 3086 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State']) | |
| 3087 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | |
| 3088 self._PrintTable( | |
| 3089 [20, 70, 14, 13], | |
| 3090 ['Depot', 'Commit SHA', 'Return Code', 'State']) | |
| 3091 else: | |
| 3092 assert False, "Invalid bisect_mode specified." | |
| 3093 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '), | |
| 3094 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '), | |
| 3095 'State'.center(13, ' ')) | |
| 3096 | |
| 3097 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str): | |
| 3098 if self.opts.bisect_mode == BISECT_MODE_MEAN: | |
| 3099 std_error = '+-%.02f' % current_data['value']['std_err'] | |
| 3100 mean = '%.02f' % current_data['value']['mean'] | |
| 3101 self._PrintTable( | |
| 3102 [20, 70, 12, 14, 13], | |
| 3103 [current_data['depot'], cl_link, mean, std_error, state_str]) | |
| 3104 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: | |
| 3105 std_error = '+-%.02f' % current_data['value']['std_err'] | |
| 3106 mean = '%.02f' % current_data['value']['mean'] | |
| 3107 self._PrintTable( | |
| 3108 [20, 70, 12, 14, 13], | |
| 3109 [current_data['depot'], cl_link, std_error, mean, state_str]) | |
| 3110 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: | |
| 3111 mean = '%d' % current_data['value']['mean'] | |
| 3112 self._PrintTable( | |
| 3113 [20, 70, 14, 13], | |
| 3114 [current_data['depot'], cl_link, mean, state_str]) | |
| 3115 | |
| 3016 def _PrintTestedCommitsTable(self, revision_data_sorted, | 3116 def _PrintTestedCommitsTable(self, revision_data_sorted, |
| 3017 first_working_revision, last_broken_revision, confidence, | 3117 first_working_revision, last_broken_revision, confidence, |
| 3018 final_step=True): | 3118 final_step=True): |
| 3019 print | 3119 print |
| 3020 if final_step: | 3120 if final_step: |
| 3021 print 'Tested commits:' | 3121 print 'Tested commits:' |
| 3022 else: | 3122 else: |
| 3023 print 'Partial results:' | 3123 print 'Partial results:' |
| 3024 print ' %20s %70s %12s %14s %13s' % ('Depot'.center(20, ' '), | 3124 self._PrintTestedCommitsHeader() |
| 3025 'Commit SHA'.center(70, ' '), 'Mean'.center(12, ' '), | |
| 3026 'Std. Error'.center(14, ' '), 'State'.center(13, ' ')) | |
| 3027 state = 0 | 3125 state = 0 |
| 3028 for current_id, current_data in revision_data_sorted: | 3126 for current_id, current_data in revision_data_sorted: |
| 3029 if current_data['value']: | 3127 if current_data['value']: |
| 3030 if (current_id == last_broken_revision or | 3128 if (current_id == last_broken_revision or |
| 3031 current_id == first_working_revision): | 3129 current_id == first_working_revision): |
| 3032 # If confidence is too low, don't add this empty line since it's | 3130 # If confidence is too low, don't add this empty line since it's |
| 3033 # used to put focus on a suspected CL. | 3131 # used to put focus on a suspected CL. |
| 3034 if confidence and final_step: | 3132 if confidence and final_step: |
| 3035 print | 3133 print |
| 3036 state += 1 | 3134 state += 1 |
| 3037 if state == 2 and not final_step: | 3135 if state == 2 and not final_step: |
| 3038 # Just want a separation between "bad" and "good" cl's. | 3136 # Just want a separation between "bad" and "good" cl's. |
| 3039 print | 3137 print |
| 3040 | 3138 |
| 3041 state_str = 'Bad' | 3139 state_str = 'Bad' |
| 3042 if state == 1 and final_step: | 3140 if state == 1 and final_step: |
| 3043 state_str = 'Suspected CL' | 3141 state_str = 'Suspected CL' |
| 3044 elif state == 2: | 3142 elif state == 2: |
| 3045 state_str = 'Good' | 3143 state_str = 'Good' |
| 3046 | 3144 |
| 3047 # If confidence is too low, don't bother outputting good/bad. | 3145 # If confidence is too low, don't bother outputting good/bad. |
| 3048 if not confidence: | 3146 if not confidence: |
| 3049 state_str = '' | 3147 state_str = '' |
| 3050 state_str = state_str.center(13, ' ') | 3148 state_str = state_str.center(13, ' ') |
| 3051 | 3149 |
| 3052 std_error = ('+-%.02f' % | |
| 3053 current_data['value']['std_err']).center(14, ' ') | |
| 3054 mean = ('%.02f' % current_data['value']['mean']).center(12, ' ') | |
| 3055 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, | 3150 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, |
| 3056 current_data['depot']) | 3151 current_data['depot']) |
| 3057 if not cl_link: | 3152 if not cl_link: |
| 3058 cl_link = current_id | 3153 cl_link = current_id |
| 3059 print ' %20s %70s %12s %14s %13s' % ( | 3154 self._PrintTestedCommitsEntry(current_data, cl_link, state_str) |
| 3060 current_data['depot'].center(20, ' '), cl_link.center(70, ' '), | |
| 3061 mean, std_error, state_str) | |
| 3062 | 3155 |
| 3063 def _PrintReproSteps(self): | 3156 def _PrintReproSteps(self): |
| 3064 print | 3157 print |
| 3065 print 'To reproduce locally:' | 3158 print 'To reproduce locally:' |
| 3066 print '$ ' + self.opts.command | 3159 print '$ ' + self.opts.command |
| 3067 if bisect_utils.IsTelemetryCommand(self.opts.command): | 3160 if bisect_utils.IsTelemetryCommand(self.opts.command): |
| 3068 print | 3161 print |
| 3069 print 'Also consider passing --profiler=list to see available profilers.' | 3162 print 'Also consider passing --profiler=list to see available profilers.' |
| 3070 | 3163 |
| 3071 def _PrintOtherRegressions(self, other_regressions, revision_data): | 3164 def _PrintOtherRegressions(self, other_regressions, revision_data): |
| (...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3426 self.no_custom_deps = False | 3519 self.no_custom_deps = False |
| 3427 self.working_directory = None | 3520 self.working_directory = None |
| 3428 self.extra_src = None | 3521 self.extra_src = None |
| 3429 self.debug_ignore_build = None | 3522 self.debug_ignore_build = None |
| 3430 self.debug_ignore_sync = None | 3523 self.debug_ignore_sync = None |
| 3431 self.debug_ignore_perf_test = None | 3524 self.debug_ignore_perf_test = None |
| 3432 self.gs_bucket = None | 3525 self.gs_bucket = None |
| 3433 self.target_arch = 'ia32' | 3526 self.target_arch = 'ia32' |
| 3434 self.builder_host = None | 3527 self.builder_host = None |
| 3435 self.builder_port = None | 3528 self.builder_port = None |
| 3529 self.bisect_mode = BISECT_MODE_MEAN | |
| 3436 | 3530 |
| 3437 def _CreateCommandLineParser(self): | 3531 def _CreateCommandLineParser(self): |
| 3438 """Creates a parser with bisect options. | 3532 """Creates a parser with bisect options. |
| 3439 | 3533 |
| 3440 Returns: | 3534 Returns: |
| 3441 An instance of optparse.OptionParser. | 3535 An instance of optparse.OptionParser. |
| 3442 """ | 3536 """ |
| 3443 usage = ('%prog [options] [-- chromium-options]\n' | 3537 usage = ('%prog [options] [-- chromium-options]\n' |
| 3444 'Perform binary search on revision history to find a minimal ' | 3538 'Perform binary search on revision history to find a minimal ' |
| 3445 'range of revisions where a peformance metric regressed.\n') | 3539 'range of revisions where a peformance metric regressed.\n') |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3480 'doesn\'t exceed --max_time_minutes. Values will be ' | 3574 'doesn\'t exceed --max_time_minutes. Values will be ' |
| 3481 'clamped to range [1, 60].' | 3575 'clamped to range [1, 60].' |
| 3482 'Default value is 20.') | 3576 'Default value is 20.') |
| 3483 group.add_option('-t', '--truncate_percent', | 3577 group.add_option('-t', '--truncate_percent', |
| 3484 type='int', | 3578 type='int', |
| 3485 default=25, | 3579 default=25, |
| 3486 help='The highest/lowest % are discarded to form a ' | 3580 help='The highest/lowest % are discarded to form a ' |
| 3487 'truncated mean. Values will be clamped to range [0, ' | 3581 'truncated mean. Values will be clamped to range [0, ' |
| 3488 '25]. Default value is 25 (highest/lowest 25% will be ' | 3582 '25]. Default value is 25 (highest/lowest 25% will be ' |
| 3489 'discarded).') | 3583 'discarded).') |
| 3584 group.add_option('--bisect_mode', | |
| 3585 type='choice', | |
| 3586 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV, | |
| 3587 BISECT_MODE_RETURN_CODE], | |
| 3588 default=BISECT_MODE_MEAN, | |
| 3589 help='The bisect mode. Choices are to bisect on the ' | |
| 3590 'difference in mean, std_dev, or return_code.') | |
| 3490 parser.add_option_group(group) | 3591 parser.add_option_group(group) |
| 3491 | 3592 |
| 3492 group = optparse.OptionGroup(parser, 'Build options') | 3593 group = optparse.OptionGroup(parser, 'Build options') |
| 3493 group.add_option('-w', '--working_directory', | 3594 group.add_option('-w', '--working_directory', |
| 3494 type='str', | 3595 type='str', |
| 3495 help='Path to the working directory where the script ' | 3596 help='Path to the working directory where the script ' |
| 3496 'will do an initial checkout of the chromium depot. The ' | 3597 'will do an initial checkout of the chromium depot. The ' |
| 3497 'files will be placed in a subdirectory "bisect" under ' | 3598 'files will be placed in a subdirectory "bisect" under ' |
| 3498 'working_directory and that will be used to perform the ' | 3599 'working_directory and that will be used to perform the ' |
| 3499 'bisection. This parameter is optional, if it is not ' | 3600 'bisection. This parameter is optional, if it is not ' |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3579 try: | 3680 try: |
| 3580 if not opts.command: | 3681 if not opts.command: |
| 3581 raise RuntimeError('missing required parameter: --command') | 3682 raise RuntimeError('missing required parameter: --command') |
| 3582 | 3683 |
| 3583 if not opts.good_revision: | 3684 if not opts.good_revision: |
| 3584 raise RuntimeError('missing required parameter: --good_revision') | 3685 raise RuntimeError('missing required parameter: --good_revision') |
| 3585 | 3686 |
| 3586 if not opts.bad_revision: | 3687 if not opts.bad_revision: |
| 3587 raise RuntimeError('missing required parameter: --bad_revision') | 3688 raise RuntimeError('missing required parameter: --bad_revision') |
| 3588 | 3689 |
| 3589 if not opts.metric: | 3690 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: |
| 3590 raise RuntimeError('missing required parameter: --metric') | 3691 raise RuntimeError('missing required parameter: --metric') |
| 3591 | 3692 |
| 3592 if opts.gs_bucket: | 3693 if opts.gs_bucket: |
| 3593 if not cloud_storage.List(opts.gs_bucket): | 3694 if not cloud_storage.List(opts.gs_bucket): |
| 3594 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) | 3695 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) |
| 3595 if not opts.builder_host: | 3696 if not opts.builder_host: |
| 3596 raise RuntimeError('Must specify try server hostname, when ' | 3697 raise RuntimeError('Must specify try server hostname, when ' |
| 3597 'gs_bucket is used: --builder_host') | 3698 'gs_bucket is used: --builder_host') |
| 3598 if not opts.builder_port: | 3699 if not opts.builder_port: |
| 3599 raise RuntimeError('Must specify try server port number, when ' | 3700 raise RuntimeError('Must specify try server port number, when ' |
| 3600 'gs_bucket is used: --builder_port') | 3701 'gs_bucket is used: --builder_port') |
| 3601 if opts.target_platform == 'cros': | 3702 if opts.target_platform == 'cros': |
| 3602 # Run sudo up front to make sure credentials are cached for later. | 3703 # Run sudo up front to make sure credentials are cached for later. |
| 3603 print 'Sudo is required to build cros:' | 3704 print 'Sudo is required to build cros:' |
| 3604 print | 3705 print |
| 3605 RunProcess(['sudo', 'true']) | 3706 RunProcess(['sudo', 'true']) |
| 3606 | 3707 |
| 3607 if not opts.cros_board: | 3708 if not opts.cros_board: |
| 3608 raise RuntimeError('missing required parameter: --cros_board') | 3709 raise RuntimeError('missing required parameter: --cros_board') |
| 3609 | 3710 |
| 3610 if not opts.cros_remote_ip: | 3711 if not opts.cros_remote_ip: |
| 3611 raise RuntimeError('missing required parameter: --cros_remote_ip') | 3712 raise RuntimeError('missing required parameter: --cros_remote_ip') |
| 3612 | 3713 |
| 3613 if not opts.working_directory: | 3714 if not opts.working_directory: |
| 3614 raise RuntimeError('missing required parameter: --working_directory') | 3715 raise RuntimeError('missing required parameter: --working_directory') |
| 3615 | 3716 |
| 3616 metric_values = opts.metric.split('/') | 3717 metric_values = opts.metric.split('/') |
| 3617 if len(metric_values) != 2: | 3718 if (len(metric_values) != 2 and |
| 3719 opts.bisect_mode != BISECT_MODE_RETURN_CODE): | |
| 3618 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) | 3720 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric) |
| 3619 | 3721 |
| 3620 opts.metric = metric_values | 3722 opts.metric = metric_values |
| 3621 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) | 3723 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) |
| 3622 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) | 3724 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) |
| 3623 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) | 3725 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) |
| 3624 opts.truncate_percent = opts.truncate_percent / 100.0 | 3726 opts.truncate_percent = opts.truncate_percent / 100.0 |
| 3625 | 3727 |
| 3626 for k, v in opts.__dict__.iteritems(): | 3728 for k, v in opts.__dict__.iteritems(): |
| 3627 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k | 3729 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3719 # The perf dashboard scrapes the "results" step in order to comment on | 3821 # The perf dashboard scrapes the "results" step in order to comment on |
| 3720 # bugs. If you change this, please update the perf dashboard as well. | 3822 # bugs. If you change this, please update the perf dashboard as well. |
| 3721 bisect_utils.OutputAnnotationStepStart('Results') | 3823 bisect_utils.OutputAnnotationStepStart('Results') |
| 3722 print 'Error: %s' % e.message | 3824 print 'Error: %s' % e.message |
| 3723 if opts.output_buildbot_annotations: | 3825 if opts.output_buildbot_annotations: |
| 3724 bisect_utils.OutputAnnotationStepClosed() | 3826 bisect_utils.OutputAnnotationStepClosed() |
| 3725 return 1 | 3827 return 1 |
| 3726 | 3828 |
| 3727 if __name__ == '__main__': | 3829 if __name__ == '__main__': |
| 3728 sys.exit(main()) | 3830 sys.exit(main()) |
| OLD | NEW |