| Index: tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
 | 
| diff --git a/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py b/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
 | 
| index 73a58aa732cd7d19a63e52be220fe35b6abbd9cf..53da5cb2054327c34e736168692f19812083afd7 100644
 | 
| --- a/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
 | 
| +++ b/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
 | 
| @@ -6,6 +6,7 @@ from collections import defaultdict
 | 
|  from itertools import chain
 | 
|  
 | 
|  from telemetry.page import page_measurement_results
 | 
| +from telemetry.page import page_measurement_value
 | 
|  from telemetry.page import perf_tests_helper
 | 
|  
 | 
|  class BuildbotPageMeasurementResults(
 | 
| @@ -69,11 +70,14 @@ class BuildbotPageMeasurementResults(
 | 
|          by_name_data_type = 'unimportant-histogram'
 | 
|        else:
 | 
|          by_name_data_type = 'unimportant'
 | 
| -      if '.' in measurement and 'histogram' not in data_type:
 | 
| -        measurement, trace = measurement.split('.', 1)
 | 
| -        trace += self._trace_tag
 | 
| -      else:
 | 
| -        trace = measurement + self._trace_tag
 | 
| +
 | 
| +      trace = ''
 | 
| +      if 'histogram' not in data_type:
 | 
| +        measurement, trace = \
 | 
| +            page_measurement_value.ChartTraceFromMeasurement(measurement)
 | 
| +      if not measurement:
 | 
| +        measurement = trace
 | 
| +      trace += self._trace_tag
 | 
|  
 | 
|        # Print individual _by_name results if there's more than 1 successful
 | 
|        # page, or if there's exactly 1 successful page but a failure exists.
 | 
| @@ -106,6 +110,30 @@ class BuildbotPageMeasurementResults(
 | 
|              values = list(chain.from_iterable(values))
 | 
|            self._PrintPerfResult(measurement, trace, values, units, data_type)
 | 
|  
 | 
| +    # Output all the by_name comparisons.  This is safe no matter
 | 
| +    # whether the _by_name results above were output, as the final
 | 
| +    # display needs to detect and ignore comparisons that refer to
 | 
| +    # non-existent base statistics.
 | 
| +    for compare_stat in [x for x in self._comparison_stats.iteritems()]:
 | 
| +      (result_stat_measurement, result_stat_trace) = \
 | 
| +          page_measurement_value.ChartTraceFromMeasurement(compare_stat[0])
 | 
| +      if not result_stat_measurement:
 | 
| +        result_stat_measurement = result_stat_trace
 | 
| +      (ref_stat_measurement, ref_stat_trace) = \
 | 
| +          page_measurement_value.ChartTraceFromMeasurement(
 | 
| +        compare_stat[1]['reference'])
 | 
| +      if not ref_stat_measurement:
 | 
| +        ref_stat_measurement = ref_stat_trace
 | 
| +      (comparison_stat_measurement, comparison_stat_trace) = \
 | 
| +          page_measurement_value.ChartTraceFromMeasurement(
 | 
| +        compare_stat[1]['comparison'])
 | 
| +      if not comparison_stat_measurement:
 | 
| +        comparison_stat_measurement = comparison_stat_trace
 | 
| +      for name in unique_pages:
 | 
| +        self.AddComparison(name, ref_stat_measurement + '_by_url',
 | 
| +                           name, comparison_stat_measurement + '_by_url',
 | 
| +                           name, result_stat_measurement + '_by_url')
 | 
| +
 | 
|      # If there were no failed pages, output the overall results (results not
 | 
|      # associated with a page).
 | 
|      if not (self.errors or self.failures):
 | 
| 
 |