| Index: tracing/tracing/metrics/estimated_input_latency_metric.html
|
| diff --git a/tracing/tracing/metrics/estimated_input_latency_metric.html b/tracing/tracing/metrics/estimated_input_latency_metric.html
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..490280eeaa1ef9558442d2a33ac90e9dd19fb87b
|
| --- /dev/null
|
| +++ b/tracing/tracing/metrics/estimated_input_latency_metric.html
|
| @@ -0,0 +1,368 @@
|
| +<!DOCTYPE html>
|
| +<!--
|
| +Copyright 2016 The Chromium Authors. All rights reserved.
|
| +Use of this source code is governed by a BSD-style license that can be
|
| +found in the LICENSE file.
|
| +-->
|
| +
|
| +<link rel="import" href="/tracing/base/category_util.html">
|
| +<link rel="import" href="/tracing/metrics/metric_registry.html">
|
| +<link rel="import" href="/tracing/metrics/system_health/utils.html">
|
| +<link rel="import" href="/tracing/model/helpers/chrome_model_helper.html">
|
| +<link rel="import" href="/tracing/model/user_model/load_expectation.html">
|
| +<link rel="import" href="/tracing/value/histogram.html">
|
| +
|
| +<script>
|
| +'use strict';
|
| +
|
| +tr.exportTo('tr.metrics', function() {
|
| +
|
| + var TOPLEVEL_CATEGORY_NAME = 'toplevel';
|
| +
|
| + // Technically smallest input latency could be zero but zero is not
|
| + // handled well by exponential histograms.
|
| + var SMALLEST_INPUT_LATENCY_MS = 0.1;
|
| +
|
| + // We expect input latencies to be generally (well) below 1000ms.
|
| + var LARGEST_INPUT_LATENCY_MS = 1000;
|
| +
|
| + var EQT_90TH_PERCENTILE_BOUNDARIES =
|
| + tr.v.HistogramBinBoundaries.createExponential(
|
| + SMALLEST_INPUT_LATENCY_MS, LARGEST_INPUT_LATENCY_MS, 100);
|
| +
|
| + // TODO(dproy): Remove after we close #2784.
|
| + function hasTitleAndCategory(event, title, category) {
|
| + return event.title === title && event.category &&
|
| + tr.b.getCategoryParts(event.category).indexOf(category) !== -1;
|
| + }
|
| +
|
| + /**
|
| + * Runs cb on slice if predicate is true for slice AND predicate is not true
|
| + * for any of slice's descendants. If cb cannot be run on the slice, the
|
| + * function recurses on slice's descendants.
|
| + */
|
| + function runConditionallyOnInnermostDescendants(slice, predicate, cb) {
|
| + var succeededOnSomeDescendant = false;
|
| + for (var child of slice.subSlices) {
|
| + var succeededOnThisChild = runConditionallyOnInnermostDescendants(
|
| + child, predicate, cb);
|
| + succeededOnSomeDescendant =
|
| + succeededOnThisChild || succeededOnSomeDescendant;
|
| + }
|
| +
|
| + if (succeededOnSomeDescendant) return true;
|
| + if (!predicate(slice)) return false;
|
| +
|
| + cb(slice);
|
| + return true;
|
| + }
|
| +
|
| + /**
|
| + * Runs cb on slices with trace category 'toplevel' that has no other slices
|
| + * with trace category 'toplevel' as descendant.
|
| + */
|
| + function forEachInnermostTopLevelSlices(thread, cb) {
|
| + var topLevelPredicate = slice =>
|
| + tr.b.getCategoryParts(slice.category).includes(
|
| + TOPLEVEL_CATEGORY_NAME);
|
| +
|
| + for (var slice of thread.sliceGroup.topLevelSlices) {
|
| + runConditionallyOnInnermostDescendants(slice, topLevelPredicate, cb);
|
| + }
|
| + }
|
| +
|
| + function getSortedInteractiveTimestamps(model) {
|
| + // TODO(dproy): When LoadExpectation v.1.0 is released,
|
| + // update this function to use the new LoadExpectation rather
|
| + // than calling loading_metric.html.
|
| +
|
| + var values = new tr.v.ValueSet();
|
| + tr.metrics.sh.loadingMetric(values, model);
|
| + var ttiValues = values.getValuesNamed('timeToFirstInteractive');
|
| + var interactiveTimestamps = [];
|
| + for (var bin of tr.b.getOnlyElement(ttiValues).allBins) {
|
| + for (var diagnostics of bin.diagnosticMaps) {
|
| + var info = diagnostics.get('Navigation infos');
|
| + interactiveTimestamps.push(info.value.interactive);
|
| + }
|
| + }
|
| + return interactiveTimestamps.sort((x, y) => x - y);
|
| + }
|
| +
|
| + function getAllNavStartTimesSorted(rendererHelper) {
|
| + var navStartTimestamps = [];
|
| + for (var e of rendererHelper.mainThread.sliceGroup.childEvents()) {
|
| + if (hasTitleAndCategory(e, 'navigationStart', 'blink.user_timing')) {
|
| + navStartTimestamps.push(e.start);
|
| + }
|
| + }
|
| + return navStartTimestamps.sort((x, y) => x - y);
|
| + }
|
| +
|
| + /**
|
| + * Returns an Array of task windows that start with the supplied interactive
|
| + * timestamps.
|
| + *
|
| + * A task window is defined as the range of time from TTI until either
|
| + *
|
| + * 1. The beginning of the next navigationStart event or
|
| + * 2. The end of the trace
|
| + *
|
| + * @param {!Array.<number>} interactiveTimestamps
|
| + * @param {!Array.<number>} navStartTimestamps
|
| + * @param {!number} traceEndTs
|
| + * @returns {!Array.<tr.b.Range>}
|
| + */
|
| + function getTaskWindows(
|
| + interactiveTimestamps, navStartTimestamps, traceEndTs) {
|
| + var navStartTsIndex = 0;
|
| + var lastTaskWindowEndTs = undefined;
|
| + var taskWindows = [];
|
| + for (var currTTI of interactiveTimestamps) {
|
| + while (navStartTsIndex < navStartTimestamps.length &&
|
| + navStartTimestamps[navStartTsIndex] < currTTI) {
|
| + // Find the first navigation start event after the interactive
|
| + // timestamp.
|
| + navStartTsIndex++;
|
| + }
|
| +
|
| + var taskWindowEndTs = navStartTsIndex < navStartTimestamps.length ?
|
| + navStartTimestamps[navStartTsIndex] : traceEndTs;
|
| +
|
| + if (taskWindowEndTs === lastTaskWindowEndTs) {
|
| + // This is the case where we have two different interactive timestamps
|
| + // with no navigationStart event between them. This is only possible
|
| + // when two different pages are sharing the same renderer process (and
|
| + // therefore the same renderer scheduler). Since currently we can only
|
| + // reliably attribute tasks to the whole renderer (and not to specific
|
| + // page/frame) it is impossible to calculate a reasonable value for
|
| + // estimated input latency in this case.
|
| + console.warn("Encountered two consecutive interactive timestamps " +
|
| + "with no navigationStart between them. Aborting estimated " +
|
| + "input latency calculation.");
|
| + return [];
|
| + }
|
| +
|
| + taskWindows.push(tr.b.Range.fromExplicitRange(
|
| + currTTI, taskWindowEndTs));
|
| + lastTaskWindowEndTs = taskWindowEndTs;
|
| + }
|
| + return taskWindows;
|
| + }
|
| +
|
| + /**
|
| + * Returns estimated queueing times of a hypothetical input event at
|
| + * specified percentiles for a given set of main thread task durations.
|
| + *
|
| + * For example, if percentiles = [0.5, 0.9], we're calculating the 50th and
|
| + * 90th percentile of estimated input event queueing time. This means we're
|
| + * calculating durations t1 and t2 such that for a hypothetical random input
|
| + * event with uniform probability of arriving in our window of interest, the
|
| + * Probablity(queuing time <= t1) = 0.5 and Probability(queueing time <= t2) =
|
| + * 0.9. We're assuming here that any input event will be scheduled immediately
|
| + * after the currently executing task, and that there are no other input event
|
| + * currently in queue before this random event arrives.
|
| + *
|
| + * If one of the durations overlaps the end of the window, the full
|
| + * duration should be in the duration array, but the length not included
|
| + * within the window should be given as `clippedLength`. For instance, if a
|
| + * 50ms duration occurs 10ms before the end of the window, 50 should be in
|
| + * the `durations` array, and `clippedLength` should be set to 40.
|
| + *
|
| + * @see https://goo.gl/paVasr
|
| + *
|
| + * @param {!Array<number>} durations - Array of durations of toplevel tasks of
|
| + * renderer main thread.
|
| + * @param {number} totalTime - Total time (in ms) of the window for which
|
| + * we're calculating estimated input latency.
|
| + * @param {!Array<number>} percentiles - Array of percentiles of interest.
|
| + * @param {number=} opt_clippedLength - Length clipped from a duration
|
| + * overlapping end of window. Default of 0.
|
| + * @return {!Map<number, number}>} Map of percentile to EQT at that percentile
|
| + */
|
| + function calculateEILRiskPercentiles(
|
| + durations, totalTime, percentiles, opt_clippedLength) {
|
| + durations.sort((a, b) => a - b);
|
| + percentiles.sort((a, b) => a - b);
|
| + var clippedLength = opt_clippedLength || 0;
|
| + var results = new Map();
|
| +
|
| + // We work with the cumulative distribution function (CDF) of estimated
|
| + // input latency, i.e. CDF(t) = Probablity(estimated input latency <= t).
|
| + // The CDF increases from 0 to 1 as we process the task durations in the
|
| + // window. Refer to the design doc for more detailed descriptions of the
|
| + // concepts and the algorithm used here: https://goo.gl/paVasr.
|
| +
|
| + // First we process the free time in the window.
|
| + var busyTime = tr.b.Statistics.sum(durations) - clippedLength;
|
| + var freeTime = totalTime - busyTime;
|
| + var currCDF = freeTime / totalTime;
|
| + var percentileIndex = 0;
|
| +
|
| + while (percentiles[percentileIndex] <= currCDF &&
|
| + percentileIndex < percentiles.length) {
|
| + // If currCDF already crossed a percentile value that means estimated
|
| + // input latency as this percentile is zero.
|
| + results.set(percentiles[percentileIndex], 0);
|
| + percentileIndex++;
|
| + }
|
| +
|
| + if (percentileIndex === percentiles.length) return results;
|
| +
|
| + var prevDuration = 0;
|
| + var prevCDF = currCDF;
|
| + var durationIndex = 0;
|
| +
|
| + while(durationIndex < durations.length) {
|
| + var remainingCount = durations.length - durationIndex;
|
| +
|
| + // When processing the durations in ascending order, we make an extra stop
|
| + // at `clippedLength`.
|
| + var currDuration;
|
| + var nextSmallestTask = durations[durationIndex];
|
| + if (prevDuration < clippedLength && nextSmallestTask >= clippedLength) {
|
| + currDuration = clippedLength;
|
| + } else {
|
| + currDuration = nextSmallestTask;
|
| + durationIndex++;
|
| + }
|
| +
|
| + var durationDelta = currDuration - prevDuration;
|
| +
|
| + // All the remaining tasks have a chunk of at least width `durationDelta`
|
| + // in them, where estimated input latency goes linearly from
|
| + // `prevDuration` to `currDuration`. We process all such chunks at once.
|
| + // For the last task in the window, this chunk could be outside the window
|
| + // border (i.e. in the clipped region), in which case we do not count
|
| + // that chunk. This case will happen if and only if the current duration
|
| + // we're processing is smaller than clippedLength.
|
| + var numChunks;
|
| + if (currDuration <= clippedLength) numChunks = remainingCount - 1;
|
| + else numChunks = remainingCount;
|
| +
|
| + var cdfDelta = durationDelta * numChunks / totalTime;
|
| + currCDF = prevCDF + cdfDelta;
|
| +
|
| + while (percentiles[percentileIndex] <= currCDF &&
|
| + percentileIndex < percentiles.length) {
|
| + // The CDF has increased linearly between prevCDF and currCDF. If
|
| + // percentile is between the two CDF values, we find the precise latency
|
| + // at that percentile by linear interpolation.
|
| + var percentile = percentiles[percentileIndex];
|
| + var latency = (percentile - prevCDF) / cdfDelta * durationDelta +
|
| + prevDuration;
|
| + results.set(percentile, latency);
|
| + percentileIndex++;
|
| + }
|
| +
|
| + if (percentileIndex === percentiles.length) break;
|
| +
|
| + prevDuration = currDuration;
|
| + prevCDF = currCDF;
|
| + }
|
| +
|
| + return results;
|
| + }
|
| +
|
| + /**
|
| + * Returns estimated queueing times of a hypothetical input event at
|
| + * specified percentiles for a given window of a renderer process.
|
| + *
|
| + * @param {!Array<number>} percentiles - Array of percentiles of interest.
|
| + * @param {!tr.model.helper.ChromeRendererHelper} rendererHelper - The helper
|
| + * for the target renderer.
|
| + * @param {!tr.b.Range} windowOfInterest - The window from which tasks are
|
| + * taken when calculating estimated input latency.
|
| + * @return {!Map<number, number}>} Map of percentile to EQT at that percentile
|
| + */
|
| + function getEQTPercentilesForWindow(
|
| + percentiles, rendererHelper, windowOfInterest) {
|
| + var totalTime = windowOfInterest.duration;
|
| + var durations = [];
|
| + var clippedLength = 0;
|
| +
|
| + forEachInnermostTopLevelSlices(rendererHelper.mainThread, slice => {
|
| + // Discard slices outside range.
|
| + if (!windowOfInterest.intersectsRangeInclusive(slice.range)) return;
|
| +
|
| + // Any part of task before window can be discarded.
|
| + var effectiveSliceStart = Math.max(windowOfInterest.min, slice.start);
|
| + var duration = slice.end - effectiveSliceStart;
|
| +
|
| + if (slice.end > windowOfInterest.max) {
|
| + // If a slice is clipped on the right, the clipped length needs to be
|
| + // specially accounted for. Consider a 200ms task, which starts 100ms
|
| + // before the end of `windowOfInterest`. If an input lands 50ms after
|
| + // the beginning of this task, the latency will be 150ms, not 50ms.
|
| + // Clipping the task on the right will lose this information. Note that
|
| + // there can be at most one slice clipped on the right.
|
| + clippedLength = duration - (windowOfInterest.max - effectiveSliceStart);
|
| + }
|
| +
|
| + durations.push(duration);
|
| + });
|
| +
|
| + return calculateEILRiskPercentiles(
|
| + durations, totalTime, percentiles, clippedLength);
|
| + }
|
| +
|
| + /**
|
| + * Calculates Estimated Input Event Latency metric.
|
| + *
|
| + * The definition of estimated input latency has not solidified yet. See
|
| + * https://goo.gl/1VWu5Z for the different definitions that are currently in
|
| + * consideration. Since we're at the prototyping phase, this function
|
| + * currently calculates the less ambiguously named metric EQT90thPercentile,
|
| + * which is the Estimated Queueing Time of a uniformly distributed random
|
| + * input event arriving after the page has become "interactive". By Estimated
|
| + * Queueing Time, we mean the queueing time of an input event under the
|
| + * assumption that the input event will be handled immediately after the
|
| + * currently running renderer process main thread task finishes.
|
| + *
|
| + * @param {!tr.v.ValueSet} values
|
| + * @param {!tr.model.Model} model
|
| + */
|
| + function estimatedInputLatencyMetric(values, model) {
|
| + var chromeHelper = model.getOrCreateHelper(
|
| + tr.model.helpers.ChromeModelHelper);
|
| + // The largest pid renderer is probably the one we're interested in
|
| + var rendererHelper = chromeHelper.rendererWithLargestPid;
|
| +
|
| + var interactiveTimestamps = getSortedInteractiveTimestamps(model);
|
| +
|
| + // We're assuming children iframes will never emit navigationStart events
|
| + var navStartTimestamps = getAllNavStartTimesSorted(rendererHelper);
|
| + var taskWindows = getTaskWindows(
|
| + interactiveTimestamps,
|
| + navStartTimestamps,
|
| + rendererHelper.mainThread.bounds.max);
|
| +
|
| + // Compute the 90th percentile
|
| + var eqtTargetPercentiles = [0.9];
|
| +
|
| + var eqt90thPercentile = new tr.v.Histogram(
|
| + 'Expected Queueing Time 90th Percentile',
|
| + tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,
|
| + EQT_90TH_PERCENTILE_BOUNDARIES);
|
| +
|
| + for (var taskWindow of taskWindows) {
|
| + var eqtPercentiles = getEQTPercentilesForWindow(
|
| + eqtTargetPercentiles, rendererHelper, taskWindow);
|
| + var eqt90th = eqtPercentiles.get(0.9);
|
| + if (eqt90th !== undefined) eqt90thPercentile.addSample(eqt90th);
|
| + }
|
| +
|
| + values.addHistogram(eqt90thPercentile);
|
| + }
|
| +
|
| + tr.metrics.MetricRegistry.register(estimatedInputLatencyMetric, {
|
| + supportsRangeOfInterest: false
|
| + });
|
| +
|
| + return {
|
| + estimatedInputLatencyMetric: estimatedInputLatencyMetric,
|
| + calculateEILRiskPercentiles: calculateEILRiskPercentiles,
|
| + getEQTPercentilesForWindow: getEQTPercentilesForWindow
|
| + };
|
| +});
|
| +</script>
|
|
|