OLD | NEW |
---|---|
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import optparse | 5 import optparse |
6 import os | 6 import os |
7 import sys | 7 import sys |
8 | 8 |
9 from telemetry.core import util | 9 from telemetry.core import util |
10 from telemetry.results import buildbot_output_formatter | 10 from telemetry.results import buildbot_output_formatter |
11 from telemetry.results import chart_json_output_formatter | 11 from telemetry.results import chart_json_output_formatter |
12 from telemetry.results import csv_output_formatter | 12 from telemetry.results import csv_output_formatter |
13 from telemetry.results import gtest_progress_reporter | 13 from telemetry.results import gtest_progress_reporter |
14 from telemetry.results import html_output_formatter | 14 from telemetry.results import html_output_formatter |
15 from telemetry.results import json_output_formatter | 15 from telemetry.results import json_output_formatter |
16 from telemetry.results import page_test_results | 16 from telemetry.results import page_test_results |
17 from telemetry.results import progress_reporter | 17 from telemetry.results import progress_reporter |
18 | 18 |
19 # Allowed output formats. The default is the first item in the list. | 19 # Allowed output formats. The default is the first item in the list. |
20 _OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'block', 'csv', 'gtest', 'json', | 20 _OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'block', 'csv', 'gtest', 'json', |
21 'chartjson', 'none') | 21 'chartjson', 'none') |
22 | 22 |
23 | 23 |
24 def AddResultsOptions(parser): | 24 def AddResultsOptions(parser): |
25 group = optparse.OptionGroup(parser, 'Results options') | 25 group = optparse.OptionGroup(parser, 'Results options') |
26 group.add_option('--chartjson', action='store_true', | 26 group.add_option('--chartjson', action='store_true', |
27 help='Output Chart JSON. Ignores --output-format.') | 27 help='Output Chart JSON. Ignores --output-format.') |
28 group.add_option('--output-format', | 28 group.add_option('--output-format', action='append', dest='output_formats', |
29 default=_OUTPUT_FORMAT_CHOICES[0], | 29 default=[_OUTPUT_FORMAT_CHOICES[0]], |
30 choices=_OUTPUT_FORMAT_CHOICES, | 30 choices=_OUTPUT_FORMAT_CHOICES, |
31 help='Output format. Defaults to "%%default". ' | 31 help='Output format. Defaults to "%%default". ' |
32 'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES)) | 32 'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES)) |
33 group.add_option('-o', '--output', | 33 group.add_option('-o', '--output', |
34 dest='output_file', | 34 dest='output_file', |
35 default=None, | |
35 help='Redirects output to a file. Defaults to stdout.') | 36 help='Redirects output to a file. Defaults to stdout.') |
36 group.add_option('--output-trace-tag', | 37 group.add_option('--output-trace-tag', |
37 default='', | 38 default='', |
38 help='Append a tag to the key of each result trace.') | 39 help='Append a tag to the key of each result trace.') |
39 group.add_option('--reset-results', action='store_true', | 40 group.add_option('--reset-results', action='store_true', |
40 help='Delete all stored results.') | 41 help='Delete all stored results.') |
41 group.add_option('--upload-results', action='store_true', | 42 group.add_option('--upload-results', action='store_true', |
42 help='Upload the results to cloud storage.') | 43 help='Upload the results to cloud storage.') |
43 group.add_option('--results-label', | 44 group.add_option('--results-label', |
44 default=None, | 45 default=None, |
45 help='Optional label to use for the results of a run .') | 46 help='Optional label to use for the results of a run .') |
46 group.add_option('--suppress_gtest_report', | 47 group.add_option('--suppress_gtest_report', |
47 default=False, | 48 default=False, |
48 help='Whether to suppress GTest progress report.') | 49 help='Whether to suppress GTest progress report.') |
49 parser.add_option_group(group) | 50 parser.add_option_group(group) |
50 | 51 |
51 | 52 |
53 def _GetOutputStream(output_format, output_file): | |
54 assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.' | |
55 assert output_format not in ('gtest', 'none'), ( | |
56 'Cannot set stream for \'gtest\' or \'none\' output formats.') | |
57 | |
58 if output_file is None: | |
59 if output_format != 'html' and output_format != 'json': | |
60 return sys.stdout | |
61 output_file = os.path.join(util.GetBaseDir(), 'results.' + output_format) | |
62 | |
63 output_file = os.path.expanduser(output_file) | |
64 open(output_file, 'a').close() # Create file if it doesn't exist. | |
65 return open(output_file, 'r+') | |
66 | |
67 | |
68 def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report): | |
69 if suppress_gtest_report: | |
70 return progress_reporter.ProgressReporter() | |
71 | |
72 return gtest_progress_reporter.GTestProgressReporter( | |
73 sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary) | |
74 | |
75 | |
52 def CreateResults(benchmark_metadata, options): | 76 def CreateResults(benchmark_metadata, options): |
53 """ | 77 """ |
54 Args: | 78 Args: |
55 options: Contains the options specified in AddResultsOptions. | 79 options: Contains the options specified in AddResultsOptions. |
56 """ | 80 """ |
57 # TODO(chrishenry): This logic prevents us from having multiple | 81 # TODO(chrishenry): It doesn't make sense to have a single output_file flag |
58 # OutputFormatters. We should have an output_file per OutputFormatter. | 82 # with multiple output formatters. We should explore other possible options: |
59 # Maybe we should have --output-dir instead of --output-file? | 83 # - Have an output_file per output formatter |
60 if options.output_format == 'html' and not options.output_file: | 84 # - Have --output-dir instead of --output-file |
61 options.output_file = os.path.join(util.GetBaseDir(), 'results.html') | 85 if len(options.output_formats) != 1 and options.output_file: |
62 elif options.output_format == 'json' and not options.output_file: | 86 raise Exception('Cannot specify output_file flag with multiple output ' |
63 options.output_file = os.path.join(util.GetBaseDir(), 'results.json') | 87 'formats.') |
64 | |
65 if hasattr(options, 'output_file') and options.output_file: | |
66 output_file = os.path.expanduser(options.output_file) | |
67 open(output_file, 'a').close() # Create file if it doesn't exist. | |
68 output_stream = open(output_file, 'r+') | |
69 else: | |
70 output_stream = sys.stdout | |
71 if not hasattr(options, 'output_format'): | |
72 options.output_format = _OUTPUT_FORMAT_CHOICES[0] | |
73 if not hasattr(options, 'output_trace_tag'): | |
74 options.output_trace_tag = '' | |
75 | 88 |
76 output_formatters = [] | 89 output_formatters = [] |
77 output_skipped_tests_summary = True | 90 for output_format in options.output_formats: |
78 reporter = None | 91 output_stream = _GetOutputStream(output_format, options.output_file) |
79 if options.output_format == 'none' or options.chartjson: | 92 if output_format == 'none' or output_format == "gtest" or options.chartjson: |
nednguyen
2014/09/16 19:18:07
So the or condition on "options.chartjson" is dead
ariblue
2014/09/16 19:29:16
I believe so, I don't see it anywhere in perf/ or
| |
80 pass | 93 continue |
81 elif options.output_format == 'csv': | 94 elif output_format == 'csv': |
82 output_formatters.append(csv_output_formatter.CsvOutputFormatter( | 95 output_formatters.append(csv_output_formatter.CsvOutputFormatter( |
83 output_stream)) | 96 output_stream)) |
84 elif options.output_format == 'buildbot': | 97 elif output_format == 'buildbot': |
85 output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( | 98 output_formatters.append( |
86 output_stream, trace_tag=options.output_trace_tag)) | 99 buildbot_output_formatter.BuildbotOutputFormatter( |
87 elif options.output_format == 'gtest': | 100 output_stream, trace_tag=options.output_trace_tag)) |
88 # TODO(chrishenry): This is here to not change the output of | 101 elif output_format == 'html': |
89 # gtest. Let's try enabling skipped tests summary for gtest test | 102 # TODO(chrishenry): We show buildbot output so that users can grep |
90 # results too (in a separate patch), and see if we break anything. | 103 # through the results easily without needing to open the html |
91 output_skipped_tests_summary = False | 104 # file. Another option for this is to output the results directly |
92 elif options.output_format == 'html': | 105 # in gtest-style results (via some sort of progress reporter), |
93 # TODO(chrishenry): We show buildbot output so that users can grep | 106 # as we plan to enable gtest-style output for all output formatters. |
94 # through the results easily without needing to open the html | 107 output_formatters.append( |
95 # file. Another option for this is to output the results directly | 108 buildbot_output_formatter.BuildbotOutputFormatter( |
96 # in gtest-style results (via some sort of progress reporter), | 109 sys.stdout, trace_tag=options.output_trace_tag)) |
97 # as we plan to enable gtest-style output for all output formatters. | 110 output_formatters.append(html_output_formatter.HtmlOutputFormatter( |
98 output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( | 111 output_stream, benchmark_metadata, options.reset_results, |
99 sys.stdout, trace_tag=options.output_trace_tag)) | 112 options.upload_results, options.browser_type, |
100 output_formatters.append(html_output_formatter.HtmlOutputFormatter( | 113 options.results_label, trace_tag=options.output_trace_tag)) |
101 output_stream, benchmark_metadata, options.reset_results, | 114 elif output_format == 'json': |
102 options.upload_results, options.browser_type, | 115 output_formatters.append(json_output_formatter.JsonOutputFormatter( |
103 options.results_label, trace_tag=options.output_trace_tag)) | 116 output_stream, benchmark_metadata)) |
104 elif options.output_format == 'json': | 117 elif output_format == 'chartjson': |
105 output_formatters.append( | 118 output_formatters.append( |
106 json_output_formatter.JsonOutputFormatter(output_stream, | 119 chart_json_output_formatter.ChartJsonOutputFormatter( |
107 benchmark_metadata)) | 120 output_stream, benchmark_metadata)) |
108 elif options.output_format == 'chartjson': | 121 else: |
109 output_formatters.append( | 122 # Should never be reached. The parser enforces the choices. |
110 chart_json_output_formatter.ChartJsonOutputFormatter( | 123 raise Exception('Invalid --output-format "%s". Valid choices are: %s' |
111 output_stream, | 124 % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES))) |
112 benchmark_metadata)) | |
113 else: | |
114 # Should never be reached. The parser enforces the choices. | |
115 raise Exception('Invalid --output-format "%s". Valid choices are: %s' | |
116 % (options.output_format, | |
117 ', '.join(_OUTPUT_FORMAT_CHOICES))) | |
118 | 125 |
119 if options.suppress_gtest_report: | 126 # TODO(chrishenry): This is here to not change the output of |
120 reporter = progress_reporter.ProgressReporter() | 127 # gtest. Let's try enabling skipped tests summary for gtest test |
121 else: | 128 # results too (in a separate patch), and see if we break anything. |
122 reporter = gtest_progress_reporter.GTestProgressReporter( | 129 output_skipped_tests_summary = 'gtest' in options.output_formats |
123 sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary) | 130 |
131 reporter = _GetProgressReporter(output_skipped_tests_summary, | |
132 options.suppress_gtest_report) | |
124 return page_test_results.PageTestResults( | 133 return page_test_results.PageTestResults( |
125 output_formatters=output_formatters, progress_reporter=reporter) | 134 output_formatters=output_formatters, progress_reporter=reporter) |
OLD | NEW |