Chromium Code Reviews| Index: tools/perf/measurements/meta_measurement.py |
| diff --git a/tools/perf/measurements/meta_measurement.py b/tools/perf/measurements/meta_measurement.py |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..7a77cffb65918fc96b6a697f95a0966c3e7610b3 |
| --- /dev/null |
| +++ b/tools/perf/measurements/meta_measurement.py |
| @@ -0,0 +1,94 @@ |
| +# Copyright 2014 The Chromium Authors. All rights reserved. |
|
tonyg
2014/02/22 14:55:19
The term "meta" gave me the wrong initial expectat
|
| +# Use of this source code is governed by a BSD-style license that can be |
| +# found in the LICENSE file. |
| + |
| +import re |
| + |
| +from telemetry.page import page_measurement |
| +from metrics import timeline as timeline_module |
| + |
| + |
| +SMOOTHNESS_METRIC_TYPE = 'smoothness' |
| + |
| + |
| +def IsMetricRequest(event_name): |
| + return event_name.startswith('MetricRequest.') |
| + |
| + |
| +def ParseMetricRequest(event_name): |
| + m = re.match('MetricRequest\.(.+?)\/(.+)', event_name) |
| + assert m |
| + return (m.group(1), m.group(2)) |
| + |
| + |
| +class MetricRequest(object): |
| + def __init__(self, event): |
| + x, y = ParseMetricRequest(event.name) |
| + self.metric_type = x |
| + self.logical_name = y |
| + self.start = event.start |
| + self.end = event.end |
|
nednguyen
2014/02/22 15:11:02
nit: Maybe use self.bounds instead?
|
| + |
| + def GetResultNameFor(self, result_name): |
| + return "%s/%s" % (self.logical_name, result_name) |
| + |
| +class _MetaMetrics(object): |
| + def __init__(self, model, renderer_thread, detailed_mode): |
| + self._model = model |
| + self._renderer_thread = renderer_thread |
| + self._detailed_mode = detailed_mode |
| + |
| + def FindMetricRequests(self): |
| + # TODO(nduca): Add support for page-load metric request. |
| + return [MetricRequest(event) for |
| + event in self._renderer_thread.IterAllAsyncSlices() |
| + if IsMetricRequest(event.name)] |
| + |
| + def CreateMetricsForMetricRequest(self, request): |
| + if request.metric_type == SMOOTHNESS_METRIC_TYPE: |
| + return [] # TODO(nduca): Hook up a real metric. |
| + raise Exception('Unrecognized metric type: %s' % request.metric_type) |
| + |
| + def AddResults(self, results): |
| + requests = self.FindMetricRequests() |
| + if len(requests) == 0: |
| + raise Exception('Expected at least one request from the page') |
| + for request in requests: |
| + metrics = self.CreateMetricsForMetricRequest(request) |
| + for m in metrics: |
| + m.AddResults(self._model, self._renderer_thread, |
| + request, results, self._detailed_mode) |
| + |
| + |
| +class MetaMeasurement(page_measurement.PageMeasurement): |
| + """ A meta measurement shifts the burden of what metrics to collect onto |
| + the page under test. Instead of the measurement having a fixed set of values |
| + it collects about the page, the page being tested issues a set of standard |
| + calls to the user timing API specifying time spans of interest for which it |
| + wants metrics, and what kind of metrics are requested for that time range. The |
| + meta measurement object collects a trace and these metric requests, then |
| + generates result values based on matching those requests with the appropriate |
| + timeline metrics. """ |
| + def __init__(self): |
| + super(MetaMeasurement, self).__init__('smoothness') |
| + |
| + def AddCommandLineOptions(self, parser): |
| + parser.add_option('--detailed-mode', '-d', action='store_true', |
| + help='Report detailed results.') |
| + |
| + def CanRunForPage(self, page): |
| + return hasattr(page, 'smoothness') |
| + |
| + def WillNavigateToPage(self, page, tab): |
| + if not tab.browser.supports_tracing: |
| + raise Exception('Not supported') |
| + tab.browser.StartTracing(timeline_module.MINIMAL_TRACE_CATEGORIES) |
|
tonyg
2014/02/22 14:55:19
Thinking aloud here, but I'm concerned about the t
|
| + |
| + def MeasurePage(self, page, tab, results): |
| + """ Collect all possible metrics and added them to results. """ |
| + trace_result = tab.browser.StopTracing() |
| + model = trace_result.AsTimelineModel() |
| + renderer_thread = model.GetRendererThreadFromTab(tab) |
| + meta_metrics = _MetaMetrics(model, renderer_thread, |
| + self.options.detailed_mode) |
| + meta_metrics.AddResults(results) |