Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2013 The Chromium Authors. All rights reserved. | 2 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Parses CSV output from the loading_measurement and outputs interesting stats. | 6 """Parses CSV output from the loading_measurement and outputs interesting stats. |
| 7 | 7 |
| 8 Example usage: | 8 Example usage: |
| 9 $ tools/perf/run_measurement --browser=release \ | 9 $ tools/perf/run_measurement --browser=release \ |
| 10 --output-format=csv --output=/path/to/loading_measurement_output.csv \ | 10 --output-format=csv --output=/path/to/loading_measurement_output.csv \ |
| 11 loading_measurement tools/perf/page_sets/top_1m.json | 11 loading_measurement tools/perf/page_sets/top_1m.json |
| 12 $ tools/perf/perf_tools/loading_measurement_analyzer.py \ | 12 $ tools/perf/measurements/loading_measurement_analyzer.py \ |
| 13 --num-slowest-urls=100 --rank-csv-file=/path/to/top-1m.csv \ | 13 --num-slowest-urls=100 --rank-csv-file=/path/to/top-1m.csv \ |
| 14 /path/to/loading_measurement_output.csv | 14 /path/to/loading_measurement_output.csv |
| 15 """ | 15 """ |
| 16 | 16 |
| 17 import collections | 17 import collections |
| 18 import csv | 18 import csv |
| 19 import heapq | 19 import heapq |
| 20 import optparse | 20 import optparse |
| 21 import os | 21 import os |
| 22 import sys | 22 import sys |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 38 def _ParseInputFile(self, input_file, options): | 38 def _ParseInputFile(self, input_file, options): |
| 39 with open(input_file, 'r') as csvfile: | 39 with open(input_file, 'r') as csvfile: |
| 40 row_dict = csv.DictReader(csvfile) | 40 row_dict = csv.DictReader(csvfile) |
| 41 for row in row_dict: | 41 for row in row_dict: |
| 42 if (options.rank_limit and | 42 if (options.rank_limit and |
| 43 self._GetRank(row['url']) > options.rank_limit): | 43 self._GetRank(row['url']) > options.rank_limit): |
| 44 continue | 44 continue |
| 45 for key, value in row.iteritems(): | 45 for key, value in row.iteritems(): |
| 46 if key in ('url', 'dom_content_loaded_time (ms)', 'load_time (ms)'): | 46 if key in ('url', 'dom_content_loaded_time (ms)', 'load_time (ms)'): |
| 47 continue | 47 continue |
| 48 if not value: | 48 if not value or value == '-': |
|
nduca
2013/07/23 06:01:27
i think this script is starting to get to the poin
tonyg
2013/07/23 18:41:21
You are probably right. Would you mind terribly if
| |
| 49 continue | 49 continue |
| 50 if '_avg' in key: | 50 if '_avg' in key: |
| 51 self.avgs[key].append((float(value), row['url'])) | 51 self.avgs[key].append((float(value), row['url'])) |
| 52 elif '_max' in key: | 52 elif '_max' in key: |
| 53 self.maxes[key].append((float(value), row['url'])) | 53 self.maxes[key].append((float(value), row['url'])) |
| 54 else: | 54 else: |
| 55 self.totals[key].append((float(value), row['url'])) | 55 self.totals[key].append((float(value), row['url'])) |
| 56 self.num_rows_parsed += 1 | 56 self.num_rows_parsed += 1 |
| 57 if options.max_rows and self.num_rows_parsed == int(options.max_rows): | 57 if options.max_rows and self.num_rows_parsed == int(options.max_rows): |
| 58 break | 58 break |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 82 for key, value in sorted(sum_totals.iteritems(), reverse=True, | 82 for key, value in sorted(sum_totals.iteritems(), reverse=True, |
| 83 key=lambda i: i[1]): | 83 key=lambda i: i[1]): |
| 84 output_key = '%30s: ' % key.replace(' (ms)', '') | 84 output_key = '%30s: ' % key.replace(' (ms)', '') |
| 85 output_value = '%10ds ' % (value / 1000) | 85 output_value = '%10ds ' % (value / 1000) |
| 86 output_percent = '%.1f%%' % (100 * value / total_time) | 86 output_percent = '%.1f%%' % (100 * value / total_time) |
| 87 print output_key, output_value, output_percent | 87 print output_key, output_value, output_percent |
| 88 | 88 |
| 89 if not self.num_slowest_urls: | 89 if not self.num_slowest_urls: |
| 90 return | 90 return |
| 91 | 91 |
| 92 for key, values in self.totals.iteritems(): | 92 for key, values in sorted(self.totals.iteritems(), reverse=True, |
| 93 key=lambda i: sum_totals[i[0]]): | |
| 93 print | 94 print |
| 94 print 'Top %d slowest %s:' % (self.num_slowest_urls, | 95 print 'Top %d slowest %s:' % (self.num_slowest_urls, |
| 95 key.replace(' (ms)', '')) | 96 key.replace(' (ms)', '')) |
| 96 slowest = heapq.nlargest(self.num_slowest_urls, values) | 97 slowest = heapq.nlargest(self.num_slowest_urls, values) |
| 97 for value, url in slowest: | 98 for value, url in slowest: |
| 98 print '\t', '%dms\t' % value, url, '(#%s)' % self._GetRank(url) | 99 print '\t', '%dms\t' % value, url, '(#%s)' % self._GetRank(url) |
| 99 | 100 |
| 100 def main(argv): | 101 def main(argv): |
| 101 prog_desc = 'Parses CSV output from the loading_measurement' | 102 prog_desc = 'Parses CSV output from the loading_measurement' |
| 102 parser = optparse.OptionParser(usage=('%prog [options]' + '\n\n' + prog_desc)) | 103 parser = optparse.OptionParser(usage=('%prog [options]' + '\n\n' + prog_desc)) |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 116 print 'Must pass --rank-csv-file with --rank-limit' | 117 print 'Must pass --rank-csv-file with --rank-limit' |
| 117 return 1 | 118 return 1 |
| 118 | 119 |
| 119 LoadingMeasurementAnalyzer(args[0], options).PrintSummary() | 120 LoadingMeasurementAnalyzer(args[0], options).PrintSummary() |
| 120 | 121 |
| 121 return 0 | 122 return 0 |
| 122 | 123 |
| 123 | 124 |
| 124 if __name__ == '__main__': | 125 if __name__ == '__main__': |
| 125 sys.exit(main(sys.argv)) | 126 sys.exit(main(sys.argv)) |
| OLD | NEW |