Chromium Code Reviews| Index: tools/telemetry/telemetry/web_perf/metrics/smoothness.py |
| diff --git a/tools/telemetry/telemetry/web_perf/metrics/smoothness.py b/tools/telemetry/telemetry/web_perf/metrics/smoothness.py |
| index c294db824e9ca2fbeae60f1ddebb76e3f55a06d2..26c56de7db2cf66c0feacd7fed63662fc2004cbf 100644 |
| --- a/tools/telemetry/telemetry/web_perf/metrics/smoothness.py |
| +++ b/tools/telemetry/telemetry/web_perf/metrics/smoothness.py |
| @@ -20,58 +20,124 @@ class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric): |
| stats = rendering_stats.RenderingStats( |
| renderer_process, model.browser_process, |
| [r.GetBounds() for r in interaction_records]) |
| + self.PopulateResultsFromStats(results, stats) |
| - input_event_latency = FlattenList(stats.input_event_latency) |
| - if input_event_latency: |
| - mean_input_event_latency = statistics.ArithmeticMean( |
| - input_event_latency) |
| - input_event_latency_discrepancy = statistics.DurationsDiscrepancy( |
| - input_event_latency) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'mean_input_event_latency', 'ms', |
| - round(mean_input_event_latency, 3))) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'input_event_latency_discrepancy', 'ms', |
| - round(input_event_latency_discrepancy, 4))) |
| - scroll_update_latency = FlattenList(stats.scroll_update_latency) |
| - if scroll_update_latency: |
| - mean_scroll_update_latency = statistics.ArithmeticMean( |
| - scroll_update_latency) |
| - scroll_update_latency_discrepancy = statistics.DurationsDiscrepancy( |
| - scroll_update_latency) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'mean_scroll_update_latency', 'ms', |
| - round(mean_scroll_update_latency, 3))) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'scroll_update_latency_discrepancy', 'ms', |
| - round(scroll_update_latency_discrepancy, 4))) |
| + def PopulateResultsFromStats(self, results, stats): |
| + none_value_reason = self.GetNoneValueReason(stats.frame_timestamps) |
| + page = results.current_page |
| + values = [ |
| + self.GetLatencyMetricValues(page, 'input_event_latency', |
| + stats.input_event_latency, |
| + none_value_reason), |
| + self.GetLatencyMetricValues(page, 'scroll_update_latency', |
| + stats.scroll_update_latency, |
| + none_value_reason), |
| + self.GetQueueingDurationValue( |
| + page, stats.frame_queueing_durations, none_value_reason), |
| + self.GetFrameTimeMetricValues( |
| + page, stats.frame_times, none_value_reason), |
| + self.GetFrameTimeDiscrepancyValue( |
| + page, stats.frame_timestamps, none_value_reason), |
| + self.GetMeanPixelsApproximatedValue( |
| + page, stats.approximated_pixel_percentages, none_value_reason) |
| + ] |
| + for v in FlattenList(values): |
| + results.AddValue(v) |
| - # List of queueing durations. |
| - frame_queueing_durations = FlattenList(stats.frame_queueing_durations) |
| - if frame_queueing_durations: |
| - results.AddValue(list_of_scalar_values.ListOfScalarValues( |
| - results.current_page, 'queueing_durations', 'ms', |
| - frame_queueing_durations)) |
| + def GetNoneValueReason(self, list_of_frame_timestamp_lists): |
|
chrishenry
2014/08/15 04:43:21
Maybe make this return boolean and name it somethi
ariblue
2014/08/15 22:10:26
Done.
|
| + """Set whether we have collected at least two frames in every range. |
| - # List of raw frame times. |
| - frame_times = FlattenList(stats.frame_times) |
| - results.AddValue(list_of_scalar_values.ListOfScalarValues( |
| - results.current_page, 'frame_times', 'ms', frame_times, |
| - description='List of raw frame times, helpful to understand the other ' |
| - 'metrics.')) |
| + If not, we can't compute meaningful smoothness metrics. Some issues that |
| + have caused this in the past include: |
| + - Browser bugs that prevents the page from redrawing |
| + - Bugs in the synthetic gesture code |
| + - Page and benchmark out of sync (e.g. clicked element was renamed) |
| + - Pages that render extremely slow |
| + - Pages that can't be scrolled |
| + """ |
| + not_enough_frames = any(len(s) < 2 for s in list_of_frame_timestamp_lists) |
|
chrishenry
2014/08/15 04:43:21
So we aren't going to try to compute anything if a
ariblue
2014/08/15 22:10:26
Done. Added a doc for the SmoothnessMetric Class.
|
| + if not_enough_frames: |
| + return "Not enough frames for smoothness metrics." |
|
chrishenry
2014/08/15 04:43:20
Single quote string.
ariblue
2014/08/15 22:10:26
Done.
|
| + return None |
| - # Arithmetic mean of frame times. |
| - mean_frame_time = statistics.ArithmeticMean(frame_times) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'mean_frame_time', 'ms', |
| - round(mean_frame_time, 3), |
| - description='Arithmetic mean of frame times.')) |
| + def GetLatencyMetricValues(self, page, name, list_of_latency_lists, |
|
chrishenry
2014/08/15 04:43:21
ComputeLatencyMetric
ariblue
2014/08/15 22:10:26
Done.
|
| + none_value_reason): |
|
chrishenry
2014/08/15 04:43:21
Instead of passing none_value_reason, maybe better
ariblue
2014/08/15 22:10:26
Done.
|
| + """Adds the mean and discrepancy for given latency stats to results.""" |
| + mean_latency = None |
| + latency_discrepancy = None |
| + latency_none_value_reason = none_value_reason |
| + if none_value_reason is None: |
| + latency_list = FlattenList(list_of_latency_lists) |
| + if len(latency_list) == 0: |
| + latency_none_value_reason = "No latency values recorded." |
| + else: |
| + mean_latency = round(statistics.ArithmeticMean(latency_list), 3) |
| + latency_discrepancy = ( |
| + round(statistics.DurationsDiscrepancy(latency_list), 4)) |
| + return [ |
| + scalar.ScalarValue( |
| + page, 'mean_%s' % name, 'ms', mean_latency, |
| + none_value_reason=latency_none_value_reason), |
| + scalar.ScalarValue( |
| + page, '%s_discrepancy' % name, 'ms', |
| + latency_discrepancy, none_value_reason=latency_none_value_reason) |
| + ] |
| - # Absolute discrepancy of frame time stamps. |
| - frame_discrepancy = statistics.TimestampsDiscrepancy( |
| - stats.frame_timestamps) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'jank', 'ms', round(frame_discrepancy, 4), |
| + def GetQueueingDurationValue(self, page, list_of_queueing_durations_lists, |
|
chrishenry
2014/08/15 04:43:21
ComputeQueueingDuration
ariblue
2014/08/15 22:10:26
Done.
|
| + none_value_reason): |
| + """Add the frame queueing durations to the results.""" |
| + queueing_durations = None |
| + queueing_none_value_reason = none_value_reason |
| + if none_value_reason is None: |
| + queueing_durations = FlattenList(list_of_queueing_durations_lists) |
| + if len(queueing_durations) == 0: |
| + queueing_durations = None |
| + queueing_none_value_reason = "Queueing delay metric is unsupported." |
| + return list_of_scalar_values.ListOfScalarValues( |
| + page, 'queueing_durations', 'ms', queueing_durations, |
| + none_value_reason=queueing_none_value_reason) |
| + |
| + def GetFrameTimeMetricValues(self, page, list_of_frame_times_lists, |
| + none_value_reason): |
| + """Add the frame times metrics to the results. |
| + |
| + This includes the raw and mean frame times, as well as the mostly_smooth |
| + metric which tracks whether we hit 60 fps for 95% of the frames.""" |
| + frame_times = None |
| + mean_frame_time = None |
| + mostly_smooth = None |
| + if none_value_reason is None: |
| + frame_times = FlattenList(list_of_frame_times_lists) |
| + mean_frame_time = round(statistics.ArithmeticMean(frame_times), 3) |
| + # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0. |
| + percentile_95 = statistics.Percentile(frame_times, 95.0) |
| + mostly_smooth = 1.0 if percentile_95 < 19.0 else 0.0 |
| + return [ |
| + list_of_scalar_values.ListOfScalarValues( |
| + page, 'frame_times', 'ms', frame_times, |
| + description='List of raw frame times, helpful to understand the ' |
| + 'other metrics.', |
| + none_value_reason=none_value_reason), |
| + scalar.ScalarValue( |
| + page, 'mean_frame_time', 'ms', mean_frame_time, |
| + description='Arithmetic mean of frame times.', |
| + none_value_reason=none_value_reason), |
| + scalar.ScalarValue( |
| + page, 'mostly_smooth', 'score', mostly_smooth, |
| + description='Were 95 percent of the frames hitting 60 fps?' |
| + 'boolean value (1/0).', |
| + none_value_reason=none_value_reason) |
| + ] |
| + |
| + def GetFrameTimeDiscrepancyValue(self, page, list_of_frame_timestamp_lists, |
| + none_value_reason): |
| + """Add the absolute discrepancy of frame time stamps to the results.""" |
| + frame_discrepancy = None |
| + if none_value_reason is None: |
| + frame_discrepancy = round(statistics.TimestampsDiscrepancy( |
| + list_of_frame_timestamp_lists), 4) |
| + return scalar.ScalarValue( |
| + page, 'jank', 'ms', frame_discrepancy, |
| description='Absolute discrepancy of frame time stamps, where ' |
| 'discrepancy is a measure of irregularity. It quantifies ' |
| 'the worst jank. For a single pause, discrepancy ' |
| @@ -79,22 +145,22 @@ class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric): |
| 'Consecutive pauses increase the discrepancy. This metric ' |
| 'is important because even if the mean and 95th ' |
| 'percentile are good, one long pause in the middle of an ' |
| - 'interaction is still bad.')) |
| + 'interaction is still bad.', |
| + none_value_reason=none_value_reason) |
| - # Are we hitting 60 fps for 95 percent of all frames? |
| - # We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0. |
| - percentile_95 = statistics.Percentile(frame_times, 95.0) |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'mostly_smooth', 'score', |
| - 1.0 if percentile_95 < 19.0 else 0.0, |
| - description='Were 95 percent of the frames hitting 60 fps?' |
| - 'boolean value (1/0).')) |
| + def GetMeanPixelsApproximatedValue(self, page, |
| + list_of_pixel_percentages_lists, |
| + none_value_reason): |
| + """Add the mean percentage of pixels approximated. |
| - # Mean percentage of pixels approximated (missing tiles, low resolution |
| - # tiles, non-ideal resolution tiles). |
| - results.AddValue(scalar.ScalarValue( |
| - results.current_page, 'mean_pixels_approximated', 'percent', |
| - round(statistics.ArithmeticMean( |
| - FlattenList(stats.approximated_pixel_percentages)), 3), |
| + This looks at tiles which are missing or of low or non-ideal resolution. |
| + """ |
| + mean_pixels_approximated = None |
| + if none_value_reason is None: |
| + mean_pixels_approximated = round(statistics.ArithmeticMean( |
| + FlattenList(list_of_pixel_percentages_lists)), 3) |
| + return scalar.ScalarValue( |
| + page, 'mean_pixels_approximated', 'percent', mean_pixels_approximated, |
| description='Percentage of pixels that were approximated ' |
| - '(checkerboarding, low-resolution tiles, etc.).')) |
| + '(checkerboarding, low-resolution tiles, etc.).', |
| + none_value_reason=none_value_reason) |