Chromium Code Reviews| Index: tracing/tracing/metrics/v8/runtime_stats_metric.html |
| diff --git a/tracing/tracing/metrics/v8/runtime_stats_metric.html b/tracing/tracing/metrics/v8/runtime_stats_metric.html |
| index 546af9daa3034938d39d42182d0d7bbfc2018849..76d30e52f8baa64e03e61b5a0a8af497a508f915 100644 |
| --- a/tracing/tracing/metrics/v8/runtime_stats_metric.html |
| +++ b/tracing/tracing/metrics/v8/runtime_stats_metric.html |
| @@ -81,10 +81,9 @@ tr.exportTo('tr.metrics.v8', function() { |
| tr.b.UnitPrefixScale.METRIC.MICRO, tr.b.UnitPrefixScale.METRIC.MILLI); |
| } |
| - function computeRuntimeStats(values, model, endTime) { |
| - var slices = [...model.getDescendantEvents()].filter(event => |
| - event instanceof tr.e.v8.V8ThreadSlice && |
| - event.start <= endTime); |
| + // TODO(crbug.com/688342): Remove this function when runtimeStatsMetric is |
| + // removed. |
| + function computeRuntimeStats(histograms, slices) { |
| var runtimeGroupCollection = new tr.e.v8.RuntimeStatsGroupCollection(); |
| runtimeGroupCollection.addSlices(slices); |
| @@ -96,12 +95,12 @@ tr.exportTo('tr.metrics.v8', function() { |
| var durationSampleHistogram = createDurationHistogram_(entry.name); |
| durationSampleHistogram.addSample(convertMicroToMilli_(entry.time)); |
| durationSamples.set(entry.name + ':duration', durationSampleHistogram); |
| - values.addHistogram(durationSampleHistogram); |
| + histograms.addHistogram(durationSampleHistogram); |
| var countSampleHistogram = createCountHistogram_(entry.name); |
| countSampleHistogram.addSample(entry.count); |
| countSamples.set(entry.name + ':count', countSampleHistogram); |
| - values.addHistogram(countSampleHistogram); |
| + histograms.addHistogram(countSampleHistogram); |
| } |
| var durationHistogram = createDurationHistogram_(runtimeGroup.name); |
| @@ -113,22 +112,87 @@ tr.exportTo('tr.metrics.v8', function() { |
| samples: countSamples |
| }); |
| - values.addHistogram(durationHistogram); |
| - values.addHistogram(countHistogram); |
| + histograms.addHistogram(durationHistogram); |
| + histograms.addHistogram(countHistogram); |
| } |
| } |
| - function runtimeStatsMetric(values, model) { |
| + // TODO(crbug.com/688342): Remove this metric and use runtimeStatsTotalMetric |
| + // instead when the runtimeStatsTotalMetric is stable. |
| + function runtimeStatsMetric(histograms, model) { |
| var interactiveTime = computeInteractiveTime_(model); |
| var domContentLoadedTime = computeDomContentLoadedTime_(model); |
| var endTime = Math.max(interactiveTime, domContentLoadedTime); |
| - computeRuntimeStats(values, model, endTime); |
| + var slices = [...model.getDescendantEvents()].filter(event => |
| + event instanceof tr.e.v8.V8ThreadSlice && event.start <= endTime); |
| + computeRuntimeStats(histograms, slices); |
| + } |
| + |
| + function computeRuntimeStatsBucketOnUE(histograms, slices, |
| + histogramNamePrefix) { |
| + var runtimeGroupCollection = new tr.e.v8.RuntimeStatsGroupCollection(); |
| + runtimeGroupCollection.addSlices(slices); |
| + |
| + for (var runtimeGroup of runtimeGroupCollection.runtimeGroups) { |
| + var histogramName = histogramNamePrefix + '_' + runtimeGroup.name; |
| + var durationHistogram = createDurationHistogram_(histogramName); |
| + durationHistogram.addSample(convertMicroToMilli_(runtimeGroup.time)); |
| + histograms.addHistogram(durationHistogram); |
| + |
| + var countHistogram = createCountHistogram_(histogramName); |
| + countHistogram.addSample(runtimeGroup.count); |
| + histograms.addHistogram(countHistogram); |
| + } |
| + } |
| + |
| + function runtimeStatsTotalMetric(histograms, model) { |
| + var v8ThreadSlices = [...model.getDescendantEvents()].filter(event => |
| + event instanceof tr.e.v8.V8ThreadSlice).sort((e1, e2) => |
| + e1.start - e2.start); |
| + var v8SlicesBucketOnUEMap = new Map(); |
| + // User expectations can sometime overlap. So, certain v8 slices can be |
| + // included in more than one expectation. We count such slices in each |
| + // of the expectations. This is done so as to minimize the noise due to |
| + // the differences in the extent of overlap between the runs. |
| + for (var expectation of model.userModel.expectations) { |
| + var slices = expectation.range.filterArray(v8ThreadSlices, |
| + event => event.start); |
| + if (slices.length === 0) continue; |
| + // filterArray filters the array that intersects the range inclusively. |
| + // Expectations are not inclusive i.e. expectations are like [0, 1), |
| + // [1, 2). v8ThreadSlices that start at 1 should be counted only in [1,2) |
| + // bucket. Filter out sample at the boundary so that they are not counted |
| + // twice. |
| + var lastSlice = slices[slices.length - 1]; |
| + if (!expectation.range.intersectsRangeExclusive(lastSlice.range)) { |
| + slices.pop(); |
| + } |
| + |
| + if (v8SlicesBucketOnUEMap.get(expectation.stageTitle) === undefined) { |
| + v8SlicesBucketOnUEMap.set(expectation.stageTitle, slices); |
| + } else { |
| + var totalSlices = v8SlicesBucketOnUEMap.get(expectation.stageTitle) |
| + .concat(slices); |
| + v8SlicesBucketOnUEMap.set(expectation.stageTitle, totalSlices); |
| + } |
| + } |
| + |
| + // Add histograms for each UE. |
| + for (var [name, slices] of v8SlicesBucketOnUEMap.entries()) { |
| + computeRuntimeStatsBucketOnUE(histograms, slices, name); |
| + } |
| + // Also compute the metric that includes all of the samples. The values |
| + // in UE buckets do not add up to the total of all samples, since we |
| + // duplicate some of the samples in multiple buckets when the UEs overlap. |
| + computeRuntimeStatsBucketOnUE(histograms, v8ThreadSlices, 'Any'); |
|
benjhayden
2017/02/13 20:40:30
Fadi and I were looking at results_rcs_gc.html and
|
| } |
| + tr.metrics.MetricRegistry.register(runtimeStatsTotalMetric); |
| tr.metrics.MetricRegistry.register(runtimeStatsMetric); |
| return { |
| runtimeStatsMetric, |
| + runtimeStatsTotalMetric, |
| }; |
| }); |
| </script> |