| Index: tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
|
| diff --git a/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py b/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
|
| index 73a58aa732cd7d19a63e52be220fe35b6abbd9cf..fdd808181de79100e27beb53062fc59149cc742a 100644
|
| --- a/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
|
| +++ b/tools/telemetry/telemetry/page/buildbot_page_measurement_results.py
|
| @@ -3,10 +3,12 @@
|
| # found in the LICENSE file.
|
|
|
| from collections import defaultdict
|
| -from itertools import chain
|
|
|
| +from telemetry import value as value_module
|
| from telemetry.page import page_measurement_results
|
| from telemetry.page import perf_tests_helper
|
| +from telemetry.value import merge_values
|
| +
|
|
|
| class BuildbotPageMeasurementResults(
|
| page_measurement_results.PageMeasurementResults):
|
| @@ -14,10 +16,10 @@ class BuildbotPageMeasurementResults(
|
| super(BuildbotPageMeasurementResults, self).__init__()
|
| self._trace_tag = trace_tag
|
|
|
| - def _PrintPerfResult(self, measurement, trace, values, units,
|
| + def _PrintPerfResult(self, measurement, trace, v, units,
|
| result_type='default'):
|
| perf_tests_helper.PrintPerfResult(
|
| - measurement, trace, values, units, result_type)
|
| + measurement, trace, v, units, result_type)
|
|
|
| def PrintSummary(self):
|
| """Print summary data in a format expected by buildbot for perf dashboards.
|
| @@ -25,100 +27,145 @@ class BuildbotPageMeasurementResults(
|
| If any failed pages exist, only output individual page results for
|
| non-failing pages, and do not output any average data.
|
| """
|
| - if self.errors or self.failures:
|
| - success_page_results = [r for r in self._page_results
|
| - if r.page.url not in
|
| - zip(*self.errors + self.failures)[0]]
|
| - else:
|
| - success_page_results = self._page_results
|
| -
|
| # Print out the list of unique pages.
|
| - # Use a set and a list to efficiently create an order preserving list of
|
| - # unique page display_names.
|
| - unique_pages = []
|
| - unique_pages_set = set()
|
| - for page_values in success_page_results:
|
| - name = page_values.page.display_name
|
| - if name in unique_pages_set:
|
| - continue
|
| - unique_pages.append(name)
|
| - unique_pages_set.add(name)
|
| - perf_tests_helper.PrintPages(unique_pages)
|
| -
|
| - # Build the results summary.
|
| - results_summary = defaultdict(list)
|
| - for measurement_name in \
|
| - self._all_measurements_that_have_been_seen.iterkeys():
|
| - for page_values in success_page_results:
|
| - value = page_values.FindValueByMeasurementName(measurement_name)
|
| - if not value:
|
| - continue
|
| - measurement_units_type = (measurement_name,
|
| - value.units,
|
| - value.data_type)
|
| - value_and_display_name = (value.value, page_values.page.display_name)
|
| - results_summary[measurement_units_type].append(value_and_display_name)
|
| -
|
| - # Output the results summary sorted by measurement name, then units, then
|
| - # data type.
|
| - for measurement_units_type, value_and_display_name_list in sorted(
|
| - results_summary.iteritems()):
|
| - measurement, units, data_type = measurement_units_type
|
| -
|
| - if 'histogram' in data_type:
|
| - by_name_data_type = 'unimportant-histogram'
|
| - else:
|
| - by_name_data_type = 'unimportant'
|
| - if '.' in measurement and 'histogram' not in data_type:
|
| - measurement, trace = measurement.split('.', 1)
|
| - trace += self._trace_tag
|
| + perf_tests_helper.PrintPages(
|
| + [page.display_name for page in self.pages_that_succeeded])
|
| + self._PrintPerPageResults()
|
| + self._PrintOverallResults()
|
| +
|
| + @property
|
| + def had_errors_or_failures(self):
|
| + return self.errors or self.failures
|
| +
|
| + def _PrintPerPageResults(self):
|
| + all_successful_page_values = (
|
| + self.GetAllPageSpecificValuesForSuccessfulPages())
|
| +
|
| + had_exactly_one_successful_page = len(
|
| + set([v.page for v in all_successful_page_values])) == 1
|
| +
|
| + # By here, due to page repeat options, all_values_from_successful_pages
|
| + # contains values of the same name not only from mulitple pages, but also
|
| + # from the same name. So even if, for instance, only one page ran, it may
|
| + # have run twice, producing two 'x' values.
|
| + #
|
| + # So, get rid of the repeated pages by merging.
|
| + merged_page_values = merge_values.MergeLikeValuesFromSamePage(
|
| + all_successful_page_values)
|
| +
|
| + # Now we have a bunch of values, but there is only one value_name per page.
|
| + # Suppose page1 and page2 ran, producing values x and y. We want to print
|
| + # x_by_url for page1
|
| + # x_by_url for page2
|
| + # x for page1, page2 combined
|
| + #
|
| + # y_by_url for page1
|
| + # y_by_url for page2
|
| + # y for page1, page2 combined
|
| + #
|
| + # We already have the x_by_url values in the values array. But, we will need
|
| + # them indexable by the value name.
|
| + #
|
| + # The following dict maps value_name -> list of pages that have values of
|
| + # that name.
|
| + per_page_values_by_value_name = defaultdict(list)
|
| + for value in merged_page_values:
|
| + per_page_values_by_value_name[value.name].append(value)
|
| +
|
| + # We already have the x_by_url values in the values array. But, we also need
|
| + # the values merged across the pages. And, we will need them indexed by
|
| + # value name so that we can find them when printing out value names in
|
| + # alphabetical order.
|
| + merged_pages_value_by_value_name = {}
|
| + for value in merge_values.MergeLikeValuesFromDifferentPages(
|
| + all_successful_page_values):
|
| + assert value.name not in merged_pages_value_by_value_name
|
| + merged_pages_value_by_value_name[value.name] = value
|
| +
|
| + # sorted_value names will govern the order we start printing values.
|
| + value_names = set([v.name for v in merged_page_values])
|
| + sorted_value_names = sorted(value_names)
|
| +
|
| + # Time to walk through the values by name, printing first the by_url values
|
| + # and then the merged_site value.
|
| + for value_name in sorted_value_names:
|
| + per_page_values = per_page_values_by_value_name.get(value_name, [])
|
| +
|
| + # Sort the values by their url
|
| + sorted_per_page_values = list(per_page_values)
|
| + sorted_per_page_values.sort(
|
| + key=lambda per_page_values: per_page_values.page.display_name)
|
| +
|
| + # Output the _by_url results.
|
| + for per_page_value in sorted_per_page_values:
|
| + self._PrintPerPageValue(per_page_value, had_exactly_one_successful_page)
|
| +
|
| + # Output the combined values.
|
| + merged_pages_value = merged_pages_value_by_value_name.get(value_name,
|
| + None)
|
| + if merged_pages_value:
|
| + self._PrintMergedPagesValue(merged_pages_value)
|
| +
|
| + def _PrintPerPageValue(self, value, had_exactly_one_successful_page):
|
| + # We dont print per-page-values when there is a trace tag.
|
| + if self._trace_tag:
|
| + return
|
| +
|
| + # If there were any page errors, we typically will print nothing.
|
| + #
|
| + # Note: this branch is structured less-densely to improve legibility.
|
| + if self.had_errors_or_failures:
|
| + if had_exactly_one_successful_page:
|
| + pass
|
| else:
|
| - trace = measurement + self._trace_tag
|
| -
|
| - # Print individual _by_name results if there's more than 1 successful
|
| - # page, or if there's exactly 1 successful page but a failure exists.
|
| - if not self._trace_tag and (len(value_and_display_name_list) > 1 or
|
| - ((self.errors or self.failures) and
|
| - len(value_and_display_name_list) == 1)):
|
| - name_value_map = defaultdict(list)
|
| - for value, name in value_and_display_name_list:
|
| - if 'histogram' in data_type and name_value_map[name]:
|
| - # TODO(tonyg/marja): The histogram processing code only accepts one
|
| - # histogram, so we only report the first histogram. Once histograms
|
| - # support aggregating multiple values, this can be removed.
|
| - continue
|
| - name_value_map[name].append(value)
|
| - for name in unique_pages:
|
| - values = perf_tests_helper.FlattenList(name_value_map[name])
|
| - if not len(values):
|
| - continue
|
| - self._PrintPerfResult(measurement + '_by_url', name,
|
| - values, units, by_name_data_type)
|
| -
|
| - # If there were no page failures, print the average data.
|
| - # For histograms, we don't print the average data, only the _by_name,
|
| - # unless there is only 1 page in which case the _by_names are omitted.
|
| - if not (self.errors or self.failures):
|
| - if ('histogram' not in data_type or
|
| - len(value_and_display_name_list) == 1):
|
| - values = [i[0] for i in value_and_display_name_list]
|
| - if isinstance(values[0], list):
|
| - values = list(chain.from_iterable(values))
|
| - self._PrintPerfResult(measurement, trace, values, units, data_type)
|
| + return
|
|
|
| + # Actually print the result.
|
| + buildbot_value = value.GetBuildbotValue()
|
| + buildbot_data_type = value.GetBuildbotDataType(
|
| + output_context=value_module.PER_PAGE_RESULT_OUTPUT_CONTEXT)
|
| + buildbot_measurement_name, buildbot_trace_name = (
|
| + value.GetBuildbotMeasurementAndTraceNameForPerPageResult())
|
| + self._PrintPerfResult(buildbot_measurement_name,
|
| + buildbot_trace_name,
|
| + buildbot_value, value.units, buildbot_data_type)
|
| +
|
| + def _PrintMergedPagesValue(self, value):
|
| + # If there were any page errors, we typically will print nothing.
|
| + #
|
| + # Note: this branch is structured less-densely to improve legibility.
|
| + if self.had_errors_or_failures:
|
| + return
|
| +
|
| + buildbot_value = value.GetBuildbotValue()
|
| + buildbot_data_type = value.GetBuildbotDataType(
|
| + output_context=value_module.MERGED_PAGES_RESULT_OUTPUT_CONTEXT)
|
| + buildbot_measurement_name, buildbot_trace_name = (
|
| + value.GetBuildbotMeasurementAndTraceNameForMergedPagesResult(
|
| + self._trace_tag))
|
| +
|
| + self._PrintPerfResult(buildbot_measurement_name,
|
| + buildbot_trace_name,
|
| + buildbot_value, value.units, buildbot_data_type)
|
| +
|
| + def _PrintOverallResults(self):
|
| # If there were no failed pages, output the overall results (results not
|
| # associated with a page).
|
| - if not (self.errors or self.failures):
|
| - for value in self._overall_results:
|
| - values = value.value
|
| - if not isinstance(values, list):
|
| - values = [values]
|
| - measurement_name = value.chart_name
|
| - if not measurement_name:
|
| - measurement_name = value.trace_name
|
| - self._PrintPerfResult(measurement_name,
|
| - value.trace_name + self._trace_tag,
|
| - values, value.units, value.data_type)
|
| + if not self.had_errors_or_failures:
|
| + for value in self._all_summary_values:
|
| + buildbot_value = value.GetBuildbotValue()
|
| + buildbot_data_type = value.GetBuildbotDataType(
|
| + output_context=value_module.SUMMARY_RESULT_OUTPUT_CONTEXT)
|
| + buildbot_measurement_name, buildbot_trace_name = (
|
| + value.GetBuildbotMeasurementAndTraceNameForMergedPagesResult(
|
| + self._trace_tag))
|
| + self._PrintPerfResult(
|
| + buildbot_measurement_name,
|
| + buildbot_trace_name,
|
| + buildbot_value,
|
| + value.units,
|
| + buildbot_data_type)
|
| +
|
|
|
| # Print the number of failed and errored pages.
|
| self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
|
|
|