Index: tools/telemetry/telemetry/results/chart_json_output_formatter.py |
diff --git a/tools/telemetry/telemetry/results/chart_json_output_formatter.py b/tools/telemetry/telemetry/results/chart_json_output_formatter.py |
index 6ba8f21c344703fc464935fe4b782e4c256698c3..db57b5d09adb5c3767f8e511fac14139c27f8f20 100644 |
--- a/tools/telemetry/telemetry/results/chart_json_output_formatter.py |
+++ b/tools/telemetry/telemetry/results/chart_json_output_formatter.py |
@@ -3,14 +3,18 @@ |
# found in the LICENSE file. |
import collections |
+import datetime |
import itertools |
import json |
+import os |
+from telemetry.results import json_output_formatter |
from telemetry.results import output_formatter |
+from telemetry.util import cloud_storage |
from telemetry.value import summary as summary_module |
def _ResultsAsChartDict(benchmark_metadata, page_specific_values, |
- summary_values): |
+ summary_values): |
"""Produces a dict for serialization to Chart JSON format from raw values. |
Chart JSON is a transformation of the basic Telemetry JSON format that |
@@ -28,46 +32,136 @@ def _ResultsAsChartDict(benchmark_metadata, page_specific_values, |
benchmark_metadata: a benchmark.BenchmarkMetadata object |
Returns: |
- A Chart JSON dict corresponding to the given data. |
+ A Chart JSON dict corresponding to the given data, and a mapping of file |
+ IDs to link names. |
""" |
summary = summary_module.Summary(page_specific_values) |
values = itertools.chain( |
summary.interleaved_computed_per_page_values_and_summaries, |
summary_values) |
charts = collections.defaultdict(dict) |
+ file_names = dict() |
+ file_list = [] |
+ |
+ benchmark_name = benchmark_metadata.name |
for value in values: |
if value.page: |
- chart_name, trace_name = (value.GetChartAndTraceNameForPerPageResult()) |
+ chart_name, trace_name = ( |
+ value.GetChartAndTraceNameForPerPageResult()) |
+ if value.GetAssociatedFileHandle(): |
+ name = chart_name + '_' + trace_name |
+ page_id = value.page.id |
+ file_names[page_id] = name |
+ file_list.append({ |
+ 'name': name, |
+ 'page_id': page_id, |
+ }) |
else: |
chart_name, trace_name = ( |
value.GetChartAndTraceNameForComputedSummaryResult(None)) |
if chart_name == trace_name: |
trace_name = 'summary' |
+ if value.GetAssociatedFileHandle(): |
+ name = chart_name + trace_name |
+ file_list.append({ |
+ 'name': name, |
+ 'chart': chart_name, |
+ }) |
# This intentionally overwrites the trace if it already exists because this |
# is expected of output from the buildbots currently. |
# See: crbug.com/413393 |
- charts[chart_name][trace_name] = value.AsDict() |
+ assert trace_name not in charts[chart_name] |
+ |
+ if not value.GetAssociatedFileHandle(): |
+ charts[chart_name][trace_name] = value.AsDict() |
+ |
+ if not file_list: |
+ file_list = [] |
result_dict = { |
'format_version': '0.1', |
- 'benchmark_name': benchmark_metadata.name, |
- 'charts': charts |
+ 'benchmark_name': benchmark_name, |
+ 'charts': charts, |
+ 'files': file_list, |
} |
- return result_dict |
+ return result_dict, file_names |
+ |
+def _GenerateAndUploadFileIndex( |
+ filenames, files, dirname, benchmark_name, should_upload): |
+ html_file = open('trace_files' + '/files.html', 'a') |
+ |
+ html_file.write('<html><head><title>Traces for %s</title></head><body>' % ( |
+ benchmark_name)) |
+ html_file.write('<ul>') |
+ for f in files: |
+ html_file.write( |
+ '<li><a href=//storage.googleapis.com/chromium-telemetry/' |
+ '%s>%s</a></li>' % ( |
+ files[f], filenames[f])) |
+ html_file.write('</ul></body></html>') |
+ |
+ html_file.flush() |
+ os.fsync(html_file) |
+ if should_upload: |
+ name = dirname + '/traces.html' |
+ url = 'http://storage.googleapis.com/chromium-telemetry/%s' % name |
+ cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, name, html_file.name) |
+ print 'View traces at ' + url |
+ return url |
# TODO(eakuefner): Transition this to translate Telemetry JSON. |
class ChartJsonOutputFormatter(output_formatter.OutputFormatter): |
- def __init__(self, output_stream, benchmark_metadata): |
+ def __init__(self, output_stream, benchmark_metadata, upload_results=False): |
super(ChartJsonOutputFormatter, self).__init__(output_stream) |
self._benchmark_metadata = benchmark_metadata |
+ self._upload_results = upload_results |
def Format(self, page_test_results): |
- json.dump(_ResultsAsChartDict( |
+ if self._upload_results: |
+ dirname = os.path.dirname(os.path.abspath(self._output_stream.name)) |
+ else: |
+ dirname = os.curdir |
+ files = json_output_formatter.OutputFiles(page_test_results, dirname, |
+ '.html') |
+ |
+ uploaded_files = dict() |
+ if self._upload_results and files: |
+ print 'Uploading files to Cloud Storage...' |
+ remote_dirname = 'trace_uploads/%s-%s' % ( |
+ self._benchmark_metadata.name, |
+ datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) |
+ for f_id in files: |
+ name = remote_dirname + '/' + str(f_id) |
+ uploaded_files[f_id] = name |
+ cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, name, files[f_id]) |
+ else: |
+ remote_dirname = None |
+ |
+ if self._upload_results: |
+ file_ids_to_locations = uploaded_files |
+ else: |
+ file_ids_to_locations = files |
+ |
+ output_dict, filenames = _ResultsAsChartDict( |
self._benchmark_metadata, |
page_test_results.all_page_specific_values, |
- page_test_results.all_summary_values), |
- self.output_stream) |
+ page_test_results.all_summary_values) |
+ |
+ index = _GenerateAndUploadFileIndex(filenames, file_ids_to_locations, |
+ remote_dirname, |
+ self._benchmark_metadata.name, |
+ remote_dirname is not None) |
+ output_dict['file_index'] = index |
+ |
+ for f in output_dict['files']: |
+ page_id = f.get('page_id') |
+ if page_id is not None: |
+ f['url'] = file_ids_to_locations[page_id] |
+ else: |
+ f['url'] = index |
+ |
+ json.dump(output_dict, self.output_stream) |
self.output_stream.write('\n') |