Index: tools/perf/metrics/gpu_timeline.py |
diff --git a/tools/perf/metrics/gpu_timeline.py b/tools/perf/metrics/gpu_timeline.py |
new file mode 100644 |
index 0000000000000000000000000000000000000000..84050336a1f255c8c9acbdf7ca359c1349859c3f |
--- /dev/null |
+++ b/tools/perf/metrics/gpu_timeline.py |
@@ -0,0 +1,195 @@ |
+# Copyright 2015 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+import collections |
+import math |
+import sys |
+ |
+from telemetry.timeline import model as model_module |
+from telemetry.timeline import async_slice as async_slice_module |
+from telemetry.value import scalar |
+from telemetry.web_perf.metrics import timeline_based_metric |
+ |
+TOPLEVEL_GL_CATEGORY = 'gpu_toplevel' |
+TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service' |
+TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device' |
+ |
+SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer') |
+DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer') |
+ |
+TRACKED_NAMES = { 'RenderCompositor': 'render_compositor', |
+ 'Compositor': 'compositor' } |
+ |
+GPU_SERVICE_DEVICE_VARIANCE = 5 |
+ |
+ |
+def GetTrackedServiceName(name): |
+ return name + '_service-cpu' |
+ |
+ |
+def GetTrackedDeviceName(name): |
+ return name + '_device-gpu' |
+ |
+ |
+def CalculateFrameTimes(events_per_frame): |
+ """Given a list of events per frame, returns a list of frame times.""" |
+ times_per_frame = [] |
+ for event_list in events_per_frame: |
+ # Prefer to use thread_duration but use duration as fallback. |
+ event_times = [(event.thread_duration or event.duration) |
+ for event in event_list] |
+ times_per_frame.append(sum(event_times)) |
+ return times_per_frame |
+ |
+ |
+class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
+ """Computes GPU based metrics.""" |
+ |
+ def __init__(self): |
+ super(GPUTimelineMetric, self).__init__() |
+ |
+ def AddResults(self, model, _, interaction_records, results): |
+ service_times = self._CalculateGPUTimelineData(model) |
+ for name, durations in service_times.iteritems(): |
+ count = len(durations) |
+ avg = 0.0 |
+ stddev = 0.0 |
+ maximum = 0.0 |
+ if count: |
+ avg = sum(durations) / count |
+ stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count) |
+ maximum = max(durations) |
+ |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ name + '_max', 'ms', maximum)) |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ name + '_avg', 'ms', avg)) |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ name + '_stddev', 'ms', stddev)) |
+ |
+ def _CalculateGPUTimelineData(self, model): |
+ """Uses the model and calculates the times for various values for each |
+ frame. The return value will be a dictionary of the following format: |
+ { |
+ EVENT_NAME1: [FRAME0_TIME, FRAME1_TIME...etc.], |
+ EVENT_NAME2: [FRAME0_TIME, FRAME1_TIME...etc.], |
+ } |
+ |
+ Event Names: |
+ mean_frame - Mean time each frame is calculated to be. |
+ mean_gpu_service-cpu: Mean time the GPU service took per frame. |
+ mean_gpu_device-gpu: Mean time the GPU device took per frame. |
+ TRACKED_NAMES_service-cpu: Using the TRACKED_NAMES dictionary, we |
+ include service traces per frame for the |
+ tracked name. |
+ TRACKED_NAMES_device-gpu: Using the TRACKED_NAMES dictionary, we |
+ include device traces per frame for the |
+ tracked name. |
+ """ |
+ all_service_events = [] |
+ current_service_frame_end = sys.maxint |
+ current_service_events = [] |
+ |
+ all_device_events = [] |
+ current_device_frame_end = sys.maxint |
+ current_device_events = [] |
+ |
+ tracked_events = {} |
+ tracked_events.update(dict([(GetTrackedServiceName(value), []) |
+ for value in TRACKED_NAMES.itervalues()])) |
+ tracked_events.update(dict([(GetTrackedDeviceName(value), []) |
+ for value in TRACKED_NAMES.itervalues()])) |
+ |
+ current_tracked_service_events = collections.defaultdict(list) |
+ current_tracked_device_events = collections.defaultdict(list) |
+ |
+ event_iter = model.IterAllEvents( |
+ event_type_predicate=model_module.IsSliceOrAsyncSlice) |
+ for event in event_iter: |
+ # Look for frame end markers |
+ if (event.category, event.name) == SERVICE_FRAME_END_MARKER: |
+ current_service_frame_end = event.end |
+ elif (event.category, event.name) == DEVICE_FRAME_END_MARKER: |
+ current_device_frame_end = event.end |
+ |
+ # Track all other toplevel gl category markers |
+ elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY: |
+ base_name = event.name |
+ dash_index = base_name.rfind('-') |
+ if dash_index != -1: |
+ base_name = base_name[:dash_index] |
+ tracked_name = TRACKED_NAMES.get(base_name, None) |
+ |
+ if event.category == TOPLEVEL_SERVICE_CATEGORY: |
+ # Check if frame has ended. |
+ if event.start >= current_service_frame_end: |
+ if current_service_events: |
+ all_service_events.append(current_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[GetTrackedServiceName(value)].append( |
+ current_tracked_service_events[value]) |
+ current_service_events = [] |
+ current_service_frame_end = sys.maxint |
+ current_tracked_service_events.clear() |
+ |
+ current_service_events.append(event) |
+ if tracked_name: |
+ current_tracked_service_events[tracked_name].append(event) |
+ |
+ elif event.category == TOPLEVEL_DEVICE_CATEGORY: |
+ # Check if frame has ended. |
+ if event.start >= current_device_frame_end: |
+ if current_device_events: |
+ all_device_events.append(current_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[GetTrackedDeviceName(value)].append( |
+ current_tracked_device_events[value]) |
+ current_device_events = [] |
+ current_device_frame_end = sys.maxint |
+ current_tracked_device_events.clear() |
+ |
+ current_device_events.append(event) |
+ if tracked_name: |
+ current_tracked_device_events[tracked_name].append(event) |
+ |
+ # Append Data for Last Frame. |
+ if current_service_events: |
+ all_service_events.append(current_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[GetTrackedServiceName(value)].append( |
+ current_tracked_service_events[value]) |
+ if current_device_events: |
+ all_device_events.append(current_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[GetTrackedDeviceName(value)].append( |
+ current_tracked_device_events[value]) |
+ |
+ # Calculate Mean Frame Time for the CPU side. |
+ frame_times = [] |
+ if all_service_events: |
+ prev_frame_end = all_service_events[0][0].start |
+ for event_list in all_service_events: |
+ last_service_event_in_frame = event_list[-1] |
+ frame_times.append(last_service_event_in_frame.end - prev_frame_end) |
+ prev_frame_end = last_service_event_in_frame.end |
+ |
+ # Create the timeline data dictionary for service side traces. |
+ gpu_service_name = GetTrackedServiceName('mean_gpu') |
+ |
+ timeline_data = {} |
+ timeline_data['mean_frame'] = frame_times |
+ timeline_data[gpu_service_name] = CalculateFrameTimes(all_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ name = GetTrackedServiceName(value) |
+ timeline_data[name] = CalculateFrameTimes(tracked_events[name]) |
+ |
+ # Add in GPU side traces if it was supported (IE. device traces exist). |
+ if all_device_events: |
+ gpu_device_name = GetTrackedDeviceName('mean_gpu') |
+ |
+ timeline_data[gpu_device_name] = CalculateFrameTimes(all_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ name = GetTrackedDeviceName(value) |
+ timeline_data[name] = CalculateFrameTimes(tracked_events[name]) |
+ |
+ return timeline_data |