| Index: tools/telemetry/telemetry/web_perf/metrics/smoothness.py
|
| diff --git a/tools/telemetry/telemetry/web_perf/metrics/smoothness.py b/tools/telemetry/telemetry/web_perf/metrics/smoothness.py
|
| index 9a49ed0243bb264d052465d647eb3eb23de98aff..6bde6d6bb714ed3c88022d525879b3148bb709fd 100644
|
| --- a/tools/telemetry/telemetry/web_perf/metrics/smoothness.py
|
| +++ b/tools/telemetry/telemetry/web_perf/metrics/smoothness.py
|
| @@ -34,7 +34,7 @@ class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric):
|
| results.current_page, 'input_event_latency_discrepancy', 'ms',
|
| round(input_event_latency_discrepancy, 4)))
|
|
|
| - # List of queueing durations
|
| + # List of queueing durations.
|
| frame_queueing_durations = FlattenList(stats.frame_queueing_durations)
|
| if frame_queueing_durations:
|
| results.AddValue(list_of_scalar_values.ListOfScalarValues(
|
| @@ -44,30 +44,45 @@ class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric):
|
| # List of raw frame times.
|
| frame_times = FlattenList(stats.frame_times)
|
| results.AddValue(list_of_scalar_values.ListOfScalarValues(
|
| - results.current_page, 'frame_times', 'ms', frame_times))
|
| + results.current_page, 'frame_times', 'ms', frame_times,
|
| + description='List of raw frame times, helpful to understand the other '
|
| + 'metrics.'))
|
|
|
| # Arithmetic mean of frame times.
|
| mean_frame_time = statistics.ArithmeticMean(frame_times)
|
| results.AddValue(scalar.ScalarValue(
|
| results.current_page, 'mean_frame_time', 'ms',
|
| - round(mean_frame_time, 3)))
|
| + round(mean_frame_time, 3),
|
| + description='Arithmetic mean of frame times.'))
|
|
|
| # Absolute discrepancy of frame time stamps.
|
| frame_discrepancy = statistics.TimestampsDiscrepancy(
|
| stats.frame_timestamps)
|
| results.AddValue(scalar.ScalarValue(
|
| - results.current_page, 'jank', 'ms', round(frame_discrepancy, 4)))
|
| + results.current_page, 'jank', 'ms', round(frame_discrepancy, 4),
|
| + description='Absolute discrepancy of frame time stamps, where '
|
| + 'discrepancy is a measure of irregularity. It quantifies '
|
| + 'the worst jank. For a single pause, discrepancy '
|
| + 'corresponds to the length of this pause in milliseconds. '
|
| + 'Consecutive pauses increase the discrepancy. This metric '
|
| + 'is important because even if the mean and 95th '
|
| + 'percentile are good, one long pause in the middle of an '
|
| + 'interaction is still bad.'))
|
|
|
| # Are we hitting 60 fps for 95 percent of all frames?
|
| # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
|
| percentile_95 = statistics.Percentile(frame_times, 95.0)
|
| results.AddValue(scalar.ScalarValue(
|
| results.current_page, 'mostly_smooth', 'score',
|
| - 1.0 if percentile_95 < 19.0 else 0.0))
|
| + 1.0 if percentile_95 < 19.0 else 0.0,
|
| + description='Were 95 percent of the frames hitting 60 fps?'
|
| + 'boolean value (1/0).'))
|
|
|
| # Mean percentage of pixels approximated (missing tiles, low resolution
|
| - # tiles, non-ideal resolution tiles)
|
| + # tiles, non-ideal resolution tiles).
|
| results.AddValue(scalar.ScalarValue(
|
| results.current_page, 'mean_pixels_approximated', 'percent',
|
| round(statistics.ArithmeticMean(
|
| - FlattenList(stats.approximated_pixel_percentages)), 3)))
|
| + FlattenList(stats.approximated_pixel_percentages)), 3),
|
| + description='Percentage of pixels that were approximated '
|
| + '(checkerboarding, low-resolution tiles, etc.).'))
|
|
|