| Index: tools/telemetry/support/html_output/results-template.html
 | 
| diff --git a/tools/telemetry/support/html_output/results-template.html b/tools/telemetry/support/html_output/results-template.html
 | 
| index 28c2857837575da8ec0261e971759bbfd9b2cd6e..c61e8771fa47eef77ae8af0a4f0006e5ba453535 100644
 | 
| --- a/tools/telemetry/support/html_output/results-template.html
 | 
| +++ b/tools/telemetry/support/html_output/results-template.html
 | 
| @@ -223,6 +223,45 @@ function TestResult(metric, values, associatedRun) {
 | 
|      this.run = function () { return associatedRun; }
 | 
|  }
 | 
|  
 | 
| +// A comparison statistic is the fractional change between the reference result
 | 
| +// and the comparion result. 
 | 
| +function TestComparisonResult(metric, referenceResult, comparisonResult, associatedRun) {
 | 
| +    var stddev = function(result) {
 | 
| +        values = result.values();
 | 
| +        return Statistics.sampleStandardDeviation(
 | 
| +            values.length, Statistics.sum(values), Statistics.squareSum(values));
 | 
| +    }
 | 
| +    var ReferenceStddev = stddev(referenceResult);
 | 
| +    var compareStddev = stddev(comparisonResult);
 | 
| +
 | 
| +    var meanCompare =
 | 
| +        ((comparisonResult.unscaledMean() - referenceResult.unscaledMean()) /
 | 
| +         referenceResult.unscaledMean());
 | 
| +    // Formuls is |(comp - ref)/ref| = |comp/ref - 1|
 | 
| +    // SD(comp/ref - 1) = SD(comp/ref)
 | 
| +    // If R = Y/X, SD(R)/R = SD(Y)/Y + SD(X)/X
 | 
| +    var stddevCompare = (comparisonResult.unscaledMean() / referenceResult.unscaledMean()) *
 | 
| +        (compareStddev / comparisonResult.unscaledMean() +
 | 
| +         ReferenceStddev / referenceResult.unscaledMean());
 | 
| +
 | 
| +    this.test = function () { return metric; }
 | 
| +    this.values = function () { return null; }
 | 
| +    this.unscaledMean = function () { return meanCompare; }
 | 
| +    this.mean = function () { return metric.scalingFactor() * this.unscaledMean(); }
 | 
| +    this.min = function () { return null; }
 | 
| +    this.max = function () { return null; }
 | 
| +    this.confidenceIntervalDelta = function () {
 | 
| +        return metric.scalingFactor() * 2.0 * stddevCompare;
 | 
| +    }
 | 
| +    this.confidenceIntervalDeltaRatio = function () { return this.confidenceIntervalDelta() / this.mean(); }
 | 
| +    this.percentDifference = function(other) { return (other.unscaledMean() - this.unscaledMean()) / this.unscaledMean(); }
 | 
| +    this.isStatisticallySignificant = function (other) {
 | 
| +        var diff = Math.abs(other.mean() - this.mean());
 | 
| +        return diff > this.confidenceIntervalDelta() && diff > other.confidenceIntervalDelta();
 | 
| +    }
 | 
| +    this.run = function () { return associatedRun; }
 | 
| +}
 | 
| +
 | 
|  function TestRun(entry) {
 | 
|      this.id = function() { return entry['buildTime']; }
 | 
|      this.revision = function () { return entry['revision']; }
 | 
| @@ -270,6 +309,9 @@ function PerfTestMetric(name, metric, unit, isImportant) {
 | 
|          } else if (mean > 10 * kilo) {
 | 
|              cachedScalingFactor = 1 / kilo;
 | 
|              cachedUnit = unit == 'ms' ? 's' : ('K ' + unit);
 | 
| +        } else if (unit == 'fraction') {
 | 
| +            cachedScalingFactor = 100;
 | 
| +            cachedUnit = 'percent';
 | 
|          } else {
 | 
|              cachedScalingFactor = 1;
 | 
|              cachedUnit = unit;
 | 
| @@ -348,6 +390,18 @@ var subpointsPlotOptions = {
 | 
|      points: {show: true, radius: 1},
 | 
|      bars: {show: false}};
 | 
|  
 | 
| +var comparisonPlotOptions = {
 | 
| +    color: plotColor,
 | 
| +    lines: {show:false},
 | 
| +    points: {
 | 
| +	show: true,
 | 
| +	radius: 1,
 | 
| +	errorbars: "y",
 | 
| +	yerr: {show: true, upperCap: "-", lowerCap: "-", radius:5}
 | 
| +    },
 | 
| +    bars: { show: false}
 | 
| +};
 | 
| +
 | 
|  var mainPlotOptions = {
 | 
|      xaxis: {
 | 
|          min: -0.5,
 | 
| @@ -479,16 +533,44 @@ function attachLinePlotLabels(test, container) {
 | 
|  function attachPlot(test, plotContainer, minIsZero) {
 | 
|      var results = test.results();
 | 
|  
 | 
| +    // Actual values
 | 
|      var values = results.reduce(function (values, result, index) {
 | 
|          var newValues = result.values();
 | 
|          return newValues ? values.concat(newValues.map(function (value) { return [index, value]; })) : values;
 | 
|      }, []);
 | 
|  
 | 
|      var plotData = [$.extend(true, {}, subpointsPlotOptions, {data: values})];
 | 
| -    plotData.push({id: 'μ', data: results.map(function (result, index) { return [index, result.mean()]; }), color: plotColor});
 | 
|  
 | 
| -    var overallMax = Statistics.max(results.map(function (result, index) { return result.max(); }));
 | 
| -    var overallMin = Statistics.min(results.map(function (result, index) { return result.min(); }));
 | 
| +    // Means for actual values.
 | 
| +    plotData.push({
 | 
| +	id: 'μ',
 | 
| +	data: results.reduce(function (values, result, index) {
 | 
| +	    if (result.values()) {
 | 
| +		values.push([index, result.mean()]);
 | 
| +	    }
 | 
| +	    return values;
 | 
| +	}, []),
 | 
| +	color: plotColor});
 | 
| +
 | 
| +    // Comparison values with bars.
 | 
| +    var comparison_values = results.reduce(function(pointList, result, index) {
 | 
| +	return result.values() ? pointList : pointList.concat(
 | 
| +	    [[index, result.mean(), result.confidenceIntervalDelta()],]);
 | 
| +    }, []);
 | 
| +    plotData.push($.extend(true, {}, comparisonPlotOptions, {data: comparison_values}));
 | 
| +
 | 
| +    var overallMax = Statistics.max(results.map(function (result, index) {
 | 
| +	return result.values() ? result.max() : result.mean() + result.confidenceIntervalDelta(); 
 | 
| +    }));
 | 
| +    var overallMin = Statistics.min(results.map(function (result, index) {
 | 
| +	return result.values() ? result.min() : result.mean() - result.confidenceIntervalDelta(); 
 | 
| +    }));
 | 
| +    // For minIsZero == true, percents are shown from 0:100.  Otherwise we
 | 
| +    // scale
 | 
| +    if (minIsZero && test.unit() == 'percent') {
 | 
| +	overallMax = 100;
 | 
| +    }
 | 
| +
 | 
|      var margin = (overallMax - overallMin) * 0.1;
 | 
|      var currentPlotOptions = $.extend(true, {}, mainPlotOptions, {yaxis: {
 | 
|          min: minIsZero ? 0 : overallMin - margin,
 | 
| @@ -640,8 +722,12 @@ function createTableRow(runs, test, referenceIndex, useLargeLinePlots) {
 | 
|              }
 | 
|          }
 | 
|  
 | 
| -        var statistics = 'σ=' + toFixedWidthPrecision(result.confidenceIntervalDelta()) + ', min=' + toFixedWidthPrecision(result.min())
 | 
| -            + ', max=' + toFixedWidthPrecision(result.max()) + '\n' + regressionAnalysis;
 | 
| +        var statistics = 'σ=' + toFixedWidthPrecision(result.confidenceIntervalDelta());
 | 
| +	if (result.min())
 | 
| +	    statistics += ', min=' + toFixedWidthPrecision(result.min());
 | 
| +	if (result.max())
 | 
| +	    statistics += ', max=' + toFixedWidthPrecision(result.max())
 | 
| +	statistics += '\n' + regressionAnalysis;
 | 
|  
 | 
|          // Tablesorter doesn't know about the second cell so put the comparison in the invisible element.
 | 
|          return '<td class="result" title="' + statistics + '">' + toFixedWidthPrecision(result.mean()) + hiddenValue
 | 
| @@ -740,6 +826,7 @@ function init() {
 | 
|              for (var testName in tests) {
 | 
|                  var rawMetrics = tests[testName].metrics;
 | 
|  
 | 
| +                var baseResults = {};
 | 
|                  for (var metricName in rawMetrics) {
 | 
|                      var fullMetricName = testName + ':' + metricName;
 | 
|                      var metric = metrics[fullMetricName];
 | 
| @@ -747,7 +834,26 @@ function init() {
 | 
|                          metric = new PerfTestMetric(testName, metricName, rawMetrics[metricName].units, rawMetrics[metricName].important);
 | 
|                          metrics[fullMetricName] = metric;
 | 
|                      }
 | 
| -                    metric.addResult(new TestResult(metric, rawMetrics[metricName].current, run));
 | 
| +                    if ('current' in rawMetrics[metricName]) {
 | 
| +                        // Regular metric; add the result now.  
 | 
| +			var result = new TestResult(metric, rawMetrics[metricName].current, run);
 | 
| +			baseResults[metricName] = result; 
 | 
| +                        metric.addResult(result);
 | 
| +                    } 
 | 
| +                }
 | 
| +                // Handle comparison metrics
 | 
| +                for (var metricName in rawMetrics) {
 | 
| +                    var fullMetricName = testName + ':' + metricName;
 | 
| +
 | 
| +                    if ('current' in rawMetrics[metricName])
 | 
| +                        continue;
 | 
| +
 | 
| +                    referenceResult = baseResults[rawMetrics[metricName].reference_statistic];
 | 
| +                    comparisonResult = baseResults[rawMetrics[metricName].comparison_statistic];
 | 
| +		    if (!referenceResult || !comparisonResult) continue;
 | 
| +
 | 
| +                    var metric = metrics[fullMetricName];
 | 
| +                    metric.addResult(new TestComparisonResult(metric, referenceResult, comparisonResult, run));
 | 
|                  }
 | 
|              }
 | 
|          }
 | 
| 
 |